Lines Matching refs:p_vf

53 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)  in qed_vf_calculate_legacy()  argument
57 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_vf_calculate_legacy()
61 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_vf_calculate_legacy()
69 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) in qed_sp_vf_start() argument
80 init_data.opaque_fid = p_vf->opaque_fid; in qed_sp_vf_start()
91 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); in qed_sp_vf_start()
92 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); in qed_sp_vf_start()
107 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; in qed_sp_vf_start()
113 p_vf->abs_vf_id, in qed_sp_vf_start()
124 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); in qed_sp_vf_start()
221 struct qed_vf_info *p_vf, in qed_iov_validate_queue_mode() argument
234 p_qcid = &p_vf->vf_queues[qid].cids[i]; in qed_iov_validate_queue_mode()
250 struct qed_vf_info *p_vf, in qed_iov_validate_rxq() argument
254 if (rx_qid >= p_vf->num_rxqs) { in qed_iov_validate_rxq()
258 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); in qed_iov_validate_rxq()
262 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); in qed_iov_validate_rxq()
266 struct qed_vf_info *p_vf, in qed_iov_validate_txq() argument
270 if (tx_qid >= p_vf->num_txqs) { in qed_iov_validate_txq()
274 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); in qed_iov_validate_txq()
278 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); in qed_iov_validate_txq()
282 struct qed_vf_info *p_vf, u16 sb_idx) in qed_iov_validate_sb() argument
286 for (i = 0; i < p_vf->num_sbs; i++) in qed_iov_validate_sb()
287 if (p_vf->igu_sbs[i] == sb_idx) in qed_iov_validate_sb()
293 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); in qed_iov_validate_sb()
299 struct qed_vf_info *p_vf) in qed_iov_validate_active_rxq() argument
303 for (i = 0; i < p_vf->num_rxqs; i++) in qed_iov_validate_active_rxq()
304 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, in qed_iov_validate_active_rxq()
313 struct qed_vf_info *p_vf) in qed_iov_validate_active_txq() argument
317 for (i = 0; i < p_vf->num_txqs; i++) in qed_iov_validate_active_txq()
318 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, in qed_iov_validate_active_txq()
332 struct qed_vf_info *p_vf; in qed_iov_post_vf_bulletin() local
334 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); in qed_iov_post_vf_bulletin()
335 if (!p_vf) in qed_iov_post_vf_bulletin()
338 if (!p_vf->vf_bulletin) in qed_iov_post_vf_bulletin()
341 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_post_vf_bulletin()
346 p_vf->bulletin.size - crc_size); in qed_iov_post_vf_bulletin()
350 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); in qed_iov_post_vf_bulletin()
355 params.dst_vfid = p_vf->abs_vf_id; in qed_iov_post_vf_bulletin()
356 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, in qed_iov_post_vf_bulletin()
357 p_vf->vf_bulletin, p_vf->bulletin.size / 4, in qed_iov_post_vf_bulletin()
784 struct qed_vf_info *p_vf; in qed_iov_enable_vf_access_msix() local
786 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); in qed_iov_enable_vf_access_msix()
787 if (!p_vf) in qed_iov_enable_vf_access_msix()
790 current_max = max_t(u8, current_max, p_vf->num_sbs); in qed_iov_enable_vf_access_msix()
960 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, in qed_iov_set_link() local
965 if (!p_vf) in qed_iov_set_link()
968 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_set_link()
1213 struct qed_vf_info *p_vf, in qed_iov_send_response() argument
1216 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_send_response()
1224 eng_vf_id = p_vf->abs_vf_id; in qed_iov_send_response()
1275 struct qed_vf_info *p_vf, in qed_iov_prep_vp_update_resp_tlvs() argument
1306 p_vf->relative_vf_id, in qed_iov_prep_vp_update_resp_tlvs()
1365 struct qed_vf_info *p_vf) in qed_iov_vf_cleanup() argument
1369 p_vf->vf_bulletin = 0; in qed_iov_vf_cleanup()
1370 p_vf->vport_instance = 0; in qed_iov_vf_cleanup()
1371 p_vf->configured_features = 0; in qed_iov_vf_cleanup()
1374 p_vf->num_rxqs = p_vf->num_sbs; in qed_iov_vf_cleanup()
1375 p_vf->num_txqs = p_vf->num_sbs; in qed_iov_vf_cleanup()
1377 p_vf->num_active_rxqs = 0; in qed_iov_vf_cleanup()
1380 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; in qed_iov_vf_cleanup()
1392 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); in qed_iov_vf_cleanup()
1393 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); in qed_iov_vf_cleanup()
1394 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); in qed_iov_vf_cleanup()
1411 struct qed_vf_info *p_vf, in qed_iov_vf_mbx_acquire_resc_cids() argument
1426 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_acquire_resc_cids()
1434 if (p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_acquire_resc_cids()
1453 struct qed_vf_info *p_vf, in qed_iov_vf_mbx_acquire_resc() argument
1460 p_resp->num_rxqs = p_vf->num_rxqs; in qed_iov_vf_mbx_acquire_resc()
1461 p_resp->num_txqs = p_vf->num_txqs; in qed_iov_vf_mbx_acquire_resc()
1462 p_resp->num_sbs = p_vf->num_sbs; in qed_iov_vf_mbx_acquire_resc()
1465 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; in qed_iov_vf_mbx_acquire_resc()
1473 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, in qed_iov_vf_mbx_acquire_resc()
1479 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, in qed_iov_vf_mbx_acquire_resc()
1481 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, in qed_iov_vf_mbx_acquire_resc()
1484 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); in qed_iov_vf_mbx_acquire_resc()
1502 p_vf->abs_vf_id, in qed_iov_vf_mbx_acquire_resc()
1520 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_iov_vf_mbx_acquire_resc()
1522 (p_vf->acquire.vfdev_info.os_type == in qed_iov_vf_mbx_acquire_resc()
1703 struct qed_vf_info *p_vf, bool val) in __qed_iov_spoofchk_set() argument
1708 if (val == p_vf->spoof_chk) { in __qed_iov_spoofchk_set()
1715 params.opaque_fid = p_vf->opaque_fid; in __qed_iov_spoofchk_set()
1716 params.vport_id = p_vf->vport_id; in __qed_iov_spoofchk_set()
1722 p_vf->spoof_chk = val; in __qed_iov_spoofchk_set()
1723 p_vf->req_spoofchk_val = p_vf->spoof_chk; in __qed_iov_spoofchk_set()
1729 val, p_vf->relative_vf_id); in __qed_iov_spoofchk_set()
1736 struct qed_vf_info *p_vf) in qed_iov_reconfigure_unicast_vlan() argument
1745 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_reconfigure_unicast_vlan()
1750 if (!p_vf->shadow_config.vlans[i].used) in qed_iov_reconfigure_unicast_vlan()
1754 filter.vlan = p_vf->shadow_config.vlans[i].vid; in qed_iov_reconfigure_unicast_vlan()
1757 filter.vlan, p_vf->relative_vf_id); in qed_iov_reconfigure_unicast_vlan()
1758 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_reconfigure_unicast_vlan()
1763 filter.vlan, p_vf->relative_vf_id); in qed_iov_reconfigure_unicast_vlan()
1773 struct qed_vf_info *p_vf, u64 events) in qed_iov_reconfigure_unicast_shadow() argument
1778 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) in qed_iov_reconfigure_unicast_shadow()
1779 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); in qed_iov_reconfigure_unicast_shadow()
1785 struct qed_vf_info *p_vf, u64 events) in qed_iov_configure_vport_forced() argument
1790 if (!p_vf->vport_instance) in qed_iov_configure_vport_forced()
1794 p_vf->p_vf_info.is_trusted_configured) { in qed_iov_configure_vport_forced()
1803 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_configure_vport_forced()
1804 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); in qed_iov_configure_vport_forced()
1806 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_configure_vport_forced()
1813 if (p_vf->p_vf_info.is_trusted_configured) in qed_iov_configure_vport_forced()
1814 p_vf->configured_features |= in qed_iov_configure_vport_forced()
1817 p_vf->configured_features |= in qed_iov_configure_vport_forced()
1830 filter.vport_to_add_to = p_vf->vport_id; in qed_iov_configure_vport_forced()
1831 filter.vlan = p_vf->bulletin.p_virt->pvid; in qed_iov_configure_vport_forced()
1836 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, in qed_iov_configure_vport_forced()
1846 vport_update.opaque_fid = p_vf->opaque_fid; in qed_iov_configure_vport_forced()
1847 vport_update.vport_id = p_vf->vport_id; in qed_iov_configure_vport_forced()
1855 : p_vf->shadow_config.inner_vlan_removal; in qed_iov_configure_vport_forced()
1869 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; in qed_iov_configure_vport_forced()
1891 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; in qed_iov_configure_vport_forced()
1893 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); in qed_iov_configure_vport_forced()
1900 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); in qed_iov_configure_vport_forced()
2065 struct qed_vf_info *p_vf, bool b_is_tx) in qed_iov_vf_mbx_qid() argument
2067 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_qid()
2071 if (!(p_vf->acquire.vfdev_info.capabilities & in qed_iov_vf_mbx_qid()
2085 p_vf->relative_vf_id); in qed_iov_vf_mbx_qid()
2093 p_vf->relative_vf_id, p_qid_tlv->qid); in qed_iov_vf_mbx_qid()
2314 struct qed_vf_info *p_vf) in qed_iov_vf_mbx_update_tunn_param() argument
2317 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_update_tunn_param()
2393 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); in qed_iov_vf_mbx_update_tunn_param()
2398 struct qed_vf_info *p_vf, in qed_iov_vf_mbx_start_txq_resp() argument
2401 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_mbx_start_txq_resp()
2412 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == in qed_iov_vf_mbx_start_txq_resp()
2430 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); in qed_iov_vf_mbx_start_txq_resp()
2778 struct qed_vf_info *p_vf, in qed_iov_vp_update_vlan_param() argument
2789 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; in qed_iov_vp_update_vlan_param()
2792 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { in qed_iov_vp_update_vlan_param()
3111 struct qed_vf_info *p_vf, in qed_iov_vf_update_vlan_shadow() argument
3119 if (p_vf->shadow_config.vlans[i].used && in qed_iov_vf_update_vlan_shadow()
3120 p_vf->shadow_config.vlans[i].vid == in qed_iov_vf_update_vlan_shadow()
3122 p_vf->shadow_config.vlans[i].used = false; in qed_iov_vf_update_vlan_shadow()
3129 p_vf->relative_vf_id); in qed_iov_vf_update_vlan_shadow()
3135 p_vf->shadow_config.vlans[i].used = false; in qed_iov_vf_update_vlan_shadow()
3141 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) in qed_iov_vf_update_vlan_shadow()
3147 if (p_vf->shadow_config.vlans[i].used) in qed_iov_vf_update_vlan_shadow()
3150 p_vf->shadow_config.vlans[i].used = true; in qed_iov_vf_update_vlan_shadow()
3151 p_vf->shadow_config.vlans[i].vid = p_params->vlan; in qed_iov_vf_update_vlan_shadow()
3159 p_vf->relative_vf_id, in qed_iov_vf_update_vlan_shadow()
3169 struct qed_vf_info *p_vf, in qed_iov_vf_update_mac_shadow() argument
3175 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) in qed_iov_vf_update_mac_shadow()
3179 if (p_vf->p_vf_info.is_trusted_configured) in qed_iov_vf_update_mac_shadow()
3185 if (ether_addr_equal(p_vf->shadow_config.macs[i], in qed_iov_vf_update_mac_shadow()
3187 eth_zero_addr(p_vf->shadow_config.macs[i]); in qed_iov_vf_update_mac_shadow()
3200 eth_zero_addr(p_vf->shadow_config.macs[i]); in qed_iov_vf_update_mac_shadow()
3209 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { in qed_iov_vf_update_mac_shadow()
3210 ether_addr_copy(p_vf->shadow_config.macs[i], in qed_iov_vf_update_mac_shadow()
3228 struct qed_vf_info *p_vf, in qed_iov_vf_update_unicast_shadow() argument
3234 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); in qed_iov_vf_update_unicast_shadow()
3240 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); in qed_iov_vf_update_unicast_shadow()
3394 struct qed_vf_info *p_vf) in qed_iov_vf_mbx_release() argument
3400 qed_iov_vf_cleanup(p_hwfn, p_vf); in qed_iov_vf_mbx_release()
3402 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { in qed_iov_vf_mbx_release()
3404 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, in qed_iov_vf_mbx_release()
3405 p_vf->opaque_fid); in qed_iov_vf_mbx_release()
3413 p_vf->state = VF_STOPPED; in qed_iov_vf_mbx_release()
3416 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, in qed_iov_vf_mbx_release()
3422 struct qed_vf_info *p_vf) in qed_iov_vf_pf_get_coalesce() argument
3424 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_pf_get_coalesce()
3441 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, in qed_iov_vf_pf_get_coalesce()
3445 p_vf->abs_vf_id, qid); in qed_iov_vf_pf_get_coalesce()
3449 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); in qed_iov_vf_pf_get_coalesce()
3454 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, in qed_iov_vf_pf_get_coalesce()
3458 p_vf->abs_vf_id, qid); in qed_iov_vf_pf_get_coalesce()
3462 p_queue = &p_vf->vf_queues[qid]; in qed_iov_vf_pf_get_coalesce()
3486 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); in qed_iov_vf_pf_get_coalesce()
3573 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) in qed_iov_vf_flr_poll_dorq() argument
3578 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); in qed_iov_vf_flr_poll_dorq()
3591 p_vf->abs_vf_id, val); in qed_iov_vf_flr_poll_dorq()
3600 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) in qed_iov_vf_flr_poll_pbf() argument
3639 p_vf->abs_vf_id, i); in qed_iov_vf_flr_poll_pbf()
3647 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) in qed_iov_vf_flr_poll() argument
3651 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); in qed_iov_vf_flr_poll()
3655 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); in qed_iov_vf_flr_poll()
3667 struct qed_vf_info *p_vf; in qed_iov_execute_vf_flr_cleanup() local
3670 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); in qed_iov_execute_vf_flr_cleanup()
3671 if (!p_vf) in qed_iov_execute_vf_flr_cleanup()
3676 u16 vfid = p_vf->abs_vf_id; in qed_iov_execute_vf_flr_cleanup()
3681 qed_iov_vf_cleanup(p_hwfn, p_vf); in qed_iov_execute_vf_flr_cleanup()
3684 if (!p_vf->b_init) in qed_iov_execute_vf_flr_cleanup()
3687 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); in qed_iov_execute_vf_flr_cleanup()
3707 p_vf->state = VF_STOPPED; in qed_iov_execute_vf_flr_cleanup()
3709 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); in qed_iov_execute_vf_flr_cleanup()
3717 if (p_vf->state == VF_RESET) in qed_iov_execute_vf_flr_cleanup()
3718 p_vf->state = VF_STOPPED; in qed_iov_execute_vf_flr_cleanup()
3722 p_vf->vf_mbx.b_pending_msg = false; in qed_iov_execute_vf_flr_cleanup()
3768 struct qed_vf_info *p_vf; in qed_iov_mark_vf_flr() local
3771 p_vf = qed_iov_get_vf_info(p_hwfn, i, false); in qed_iov_mark_vf_flr()
3772 if (!p_vf) in qed_iov_mark_vf_flr()
3775 vfid = p_vf->abs_vf_id; in qed_iov_mark_vf_flr()
3778 u16 rel_vf_id = p_vf->relative_vf_id; in qed_iov_mark_vf_flr()
3784 p_vf->state = VF_RESET; in qed_iov_mark_vf_flr()
3805 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, in qed_iov_get_link() local
3810 if (!p_vf) in qed_iov_get_link()
3813 p_bulletin = p_vf->bulletin.p_virt; in qed_iov_get_link()
3826 struct qed_vf_info *p_vf) in qed_iov_vf_pf_bulletin_update_mac() argument
3828 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; in qed_iov_vf_pf_bulletin_update_mac()
3829 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; in qed_iov_vf_pf_bulletin_update_mac()
3834 if (!p_vf->p_vf_info.is_trusted_configured) { in qed_iov_vf_pf_bulletin_update_mac()
3838 p_vf->abs_vf_id); in qed_iov_vf_pf_bulletin_update_mac()
3848 p_vf->abs_vf_id, p_req->mac); in qed_iov_vf_pf_bulletin_update_mac()
3851 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, in qed_iov_vf_pf_bulletin_update_mac()
3861 struct qed_vf_info *p_vf; in qed_iov_process_mbx_req() local
3863 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); in qed_iov_process_mbx_req()
3864 if (!p_vf) in qed_iov_process_mbx_req()
3867 mbx = &p_vf->vf_mbx; in qed_iov_process_mbx_req()
3873 p_vf->abs_vf_id); in qed_iov_process_mbx_req()
3882 p_vf->abs_vf_id, mbx->first_tlv.tl.type); in qed_iov_process_mbx_req()
3886 !p_vf->b_malicious) { in qed_iov_process_mbx_req()
3889 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3892 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3895 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3898 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3901 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3904 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3907 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3910 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3913 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3916 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3919 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3922 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3925 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3928 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3931 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3934 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3937 qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); in qed_iov_process_mbx_req()
3943 p_vf->abs_vf_id, mbx->first_tlv.tl.type); in qed_iov_process_mbx_req()
3945 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, in qed_iov_process_mbx_req()
3958 p_vf->abs_vf_id, in qed_iov_process_mbx_req()
3966 if (p_vf->acquire.first_tlv.reply_address && in qed_iov_process_mbx_req()
3968 p_vf->acquire.first_tlv.reply_address)) { in qed_iov_process_mbx_req()
3969 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, in qed_iov_process_mbx_req()
3977 p_vf->abs_vf_id); in qed_iov_process_mbx_req()
3989 struct qed_vf_info *p_vf; in qed_iov_pf_get_pending_events() local
3991 p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; in qed_iov_pf_get_pending_events()
3992 if (p_vf->vf_mbx.b_pending_msg) in qed_iov_pf_get_pending_events()
4016 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, in qed_sriov_vfpf_msg() local
4019 if (!p_vf) in qed_sriov_vfpf_msg()
4025 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; in qed_sriov_vfpf_msg()
4028 p_vf->vf_mbx.b_pending_msg = true; in qed_sriov_vfpf_msg()
4037 struct qed_vf_info *p_vf; in qed_sriov_vfpf_malicious() local
4039 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); in qed_sriov_vfpf_malicious()
4041 if (!p_vf) in qed_sriov_vfpf_malicious()
4044 if (!p_vf->b_malicious) { in qed_sriov_vfpf_malicious()
4047 p_vf->abs_vf_id, p_data->err_id); in qed_sriov_vfpf_malicious()
4049 p_vf->b_malicious = true; in qed_sriov_vfpf_malicious()
4053 p_vf->abs_vf_id, p_data->err_id); in qed_sriov_vfpf_malicious()
4307 struct qed_vf_info *p_vf; in qed_iov_bulletin_get_mac() local
4309 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); in qed_iov_bulletin_get_mac()
4310 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_mac()
4313 if (!(p_vf->bulletin.p_virt->valid_bitmap & in qed_iov_bulletin_get_mac()
4317 return p_vf->bulletin.p_virt->mac; in qed_iov_bulletin_get_mac()
4323 struct qed_vf_info *p_vf; in qed_iov_bulletin_get_forced_mac() local
4325 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); in qed_iov_bulletin_get_forced_mac()
4326 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_forced_mac()
4329 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) in qed_iov_bulletin_get_forced_mac()
4332 return p_vf->bulletin.p_virt->mac; in qed_iov_bulletin_get_forced_mac()
4338 struct qed_vf_info *p_vf; in qed_iov_bulletin_get_forced_vlan() local
4340 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); in qed_iov_bulletin_get_forced_vlan()
4341 if (!p_vf || !p_vf->bulletin.p_virt) in qed_iov_bulletin_get_forced_vlan()
4344 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) in qed_iov_bulletin_get_forced_vlan()
4347 return p_vf->bulletin.p_virt->pvid; in qed_iov_bulletin_get_forced_vlan()