| /Linux-v5.15/drivers/net/ethernet/intel/ice/ |
| D | ice_lib.c | 42 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena) in ice_vsi_ctrl_all_rx_rings() argument 47 for (i = 0; i < vsi->num_rxq; i++) in ice_vsi_ctrl_all_rx_rings() 48 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false); in ice_vsi_ctrl_all_rx_rings() 50 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings() 52 for (i = 0; i < vsi->num_rxq; i++) { in ice_vsi_ctrl_all_rx_rings() 53 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i); in ice_vsi_ctrl_all_rx_rings() 68 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) in ice_vsi_alloc_arrays() argument 70 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays() 76 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays() 77 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays() [all …]
|
| D | ice_fltr.c | 55 ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) in ice_fltr_add_mac_list() argument 57 return ice_add_mac(&vsi->back->hw, list); in ice_fltr_add_mac_list() 66 ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) in ice_fltr_remove_mac_list() argument 68 return ice_remove_mac(&vsi->back->hw, list); in ice_fltr_remove_mac_list() 77 ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) in ice_fltr_add_vlan_list() argument 79 return ice_add_vlan(&vsi->back->hw, list); in ice_fltr_add_vlan_list() 88 ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) in ice_fltr_remove_vlan_list() argument 90 return ice_remove_vlan(&vsi->back->hw, list); in ice_fltr_remove_vlan_list() 99 ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) in ice_fltr_add_eth_list() argument 101 return ice_add_eth_mac(&vsi->back->hw, list); in ice_fltr_add_eth_list() [all …]
|
| D | ice_lib.h | 13 void ice_update_eth_stats(struct ice_vsi *vsi); 15 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); 17 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx); 19 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); 21 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); 23 void ice_vsi_cfg_msix(struct ice_vsi *vsi); 26 ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); 28 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); 30 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi); 32 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena); [all …]
|
| D | ice_arfs.c | 10 static bool ice_is_arfs_active(struct ice_vsi *vsi) in ice_is_arfs_active() argument 12 return !!vsi->arfs_fltr_list; in ice_is_arfs_active() 28 struct ice_vsi *vsi; in ice_is_arfs_using_perfect_flow() local 30 vsi = ice_get_main_vsi(pf); in ice_is_arfs_using_perfect_flow() 31 if (!vsi) in ice_is_arfs_using_perfect_flow() 34 arfs_fltr_cntrs = vsi->arfs_fltr_cntrs; in ice_is_arfs_using_perfect_flow() 59 ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi, in ice_arfs_update_active_fltr_cntrs() argument 62 struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs; in ice_arfs_update_active_fltr_cntrs() 90 …dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\… in ice_arfs_update_active_fltr_cntrs() 105 ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head) in ice_arfs_del_flow_rules() argument [all …]
|
| D | ice_xsk.c | 20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) in ice_qp_reset_stats() argument 22 memset(&vsi->rx_rings[q_idx]->rx_stats, 0, in ice_qp_reset_stats() 23 sizeof(vsi->rx_rings[q_idx]->rx_stats)); in ice_qp_reset_stats() 24 memset(&vsi->tx_rings[q_idx]->stats, 0, in ice_qp_reset_stats() 25 sizeof(vsi->tx_rings[q_idx]->stats)); in ice_qp_reset_stats() 26 if (ice_is_xdp_ena_vsi(vsi)) in ice_qp_reset_stats() 27 memset(&vsi->xdp_rings[q_idx]->stats, 0, in ice_qp_reset_stats() 28 sizeof(vsi->xdp_rings[q_idx]->stats)); in ice_qp_reset_stats() 36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) in ice_qp_clean_rings() argument 38 ice_clean_tx_ring(vsi->tx_rings[q_idx]); in ice_qp_clean_rings() [all …]
|
| D | ice_base.c | 101 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) in ice_vsi_alloc_q_vector() argument 103 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_q_vector() 112 q_vector->vsi = vsi; in ice_vsi_alloc_q_vector() 119 if (vsi->type == ICE_VSI_VF) in ice_vsi_alloc_q_vector() 129 if (vsi->netdev) in ice_vsi_alloc_q_vector() 130 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, in ice_vsi_alloc_q_vector() 135 vsi->q_vectors[v_idx] = q_vector; in ice_vsi_alloc_q_vector() 145 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) in ice_free_q_vector() argument 148 struct ice_pf *pf = vsi->back; in ice_free_q_vector() 153 if (!vsi->q_vectors[v_idx]) { in ice_free_q_vector() [all …]
|
| D | ice_main.c | 49 static int ice_vsi_open(struct ice_vsi *vsi); 83 struct ice_vsi *vsi = NULL; in ice_check_for_hang_subtask() local 90 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask() 91 vsi = pf->vsi[v]; in ice_check_for_hang_subtask() 95 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) in ice_check_for_hang_subtask() 98 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) in ice_check_for_hang_subtask() 101 hw = &vsi->back->hw; in ice_check_for_hang_subtask() 103 for (i = 0; i < vsi->num_txq; i++) { in ice_check_for_hang_subtask() 104 struct ice_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask() 142 struct ice_vsi *vsi; in ice_init_mac_fltr() local [all …]
|
| D | ice_ethtool.c | 34 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * in ice_q_stats_len() 176 struct ice_vsi *vsi = np->vsi; in ice_get_drvinfo() local 177 struct ice_pf *pf = vsi->back; in ice_get_drvinfo() 208 struct ice_pf *pf = np->vsi->back; in ice_get_regs() 222 struct ice_pf *pf = np->vsi->back; in ice_get_msglevel() 236 struct ice_pf *pf = np->vsi->back; in ice_set_msglevel() 251 struct ice_pf *pf = np->vsi->back; in ice_get_eeprom_len() 261 struct ice_vsi *vsi = np->vsi; in ice_get_eeprom() local 262 struct ice_pf *pf = vsi->back; in ice_get_eeprom() 340 status = ice_get_link_status(np->vsi->port_info, &link_up); in ice_link_test() [all …]
|
| D | ice.h | 126 #define ice_for_each_txq(vsi, i) \ argument 127 for ((i) = 0; (i) < (vsi)->num_txq; (i)++) 129 #define ice_for_each_rxq(vsi, i) \ argument 130 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 133 #define ice_for_each_alloc_txq(vsi, i) \ argument 134 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) 136 #define ice_for_each_alloc_rxq(vsi, i) \ argument 137 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) 139 #define ice_for_each_q_vector(vsi, i) \ argument 140 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) [all …]
|
| D | ice_fltr.h | 9 ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, 12 ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, 15 ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, 18 ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); 20 ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, 23 ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); 26 ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid, 29 ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid, 33 ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, 36 ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, [all …]
|
| D | ice_idc.c | 57 if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) in ice_find_vsi() 58 return pf->vsi[i]; in ice_find_vsi() 70 struct ice_vsi *vsi; in ice_add_rdma_qset() local 85 vsi = ice_get_main_vsi(pf); in ice_add_rdma_qset() 86 if (!vsi) { in ice_add_rdma_qset() 97 status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_add_rdma_qset() 104 status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc, in ice_add_rdma_qset() 110 vsi->qset_handle[qset->tc] = qset->qs_handle; in ice_add_rdma_qset() 124 struct ice_vsi *vsi; in ice_del_rdma_qset() local 131 vsi = ice_find_vsi(pf, qset->vport_id); in ice_del_rdma_qset() [all …]
|
| D | ice_virtchnl_pf.c | 256 return vf->pf->vsi[vf->lan_vsi_idx]; in ice_get_vf_vsi() 469 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); in ice_vf_ctrl_vsi_release() 520 struct ice_vsi *vsi; in ice_dis_vf_mappings() local 526 vsi = ice_get_vf_vsi(vf); in ice_dis_vf_mappings() 544 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) in ice_dis_vf_mappings() 549 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) in ice_dis_vf_mappings() 601 struct ice_vsi *vsi = ice_get_vf_vsi(vf); in ice_dis_vf_qs() local 603 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); in ice_dis_vf_qs() 604 ice_vsi_stop_all_rx_rings(vsi); in ice_dis_vf_qs() 763 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) in ice_vsi_manage_pvid() argument [all …]
|
| D | ice_base.h | 12 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); 13 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); 14 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); 15 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); 16 void ice_vsi_free_q_vectors(struct ice_vsi *vsi); 18 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, 22 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); 24 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); 27 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 31 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
|
| D | ice_dcb_lib.c | 12 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) in ice_vsi_cfg_netdev_tc() argument 14 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc() 15 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc() 28 if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) in ice_vsi_cfg_netdev_tc() 34 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc() 36 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc() 37 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc() 38 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc() 44 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc() 76 struct ice_vsi *vsi; in ice_is_pfc_causing_hung_q() local [all …]
|
| D | ice_arfs.h | 44 void ice_clear_arfs(struct ice_vsi *vsi); 45 void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); 46 void ice_init_arfs(struct ice_vsi *vsi); 48 int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); 55 static inline void ice_clear_arfs(struct ice_vsi *vsi) { } in ice_clear_arfs() argument 56 static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { } in ice_free_cpu_rx_rmap() argument 57 static inline void ice_init_arfs(struct ice_vsi *vsi) { } in ice_init_arfs() argument 62 static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi) in ice_set_cpu_rx_rmap() argument
|
| /Linux-v5.15/drivers/infiniband/hw/irdma/ |
| D | ws.c | 19 static struct irdma_ws_node *irdma_alloc_node(struct irdma_sc_vsi *vsi, in irdma_alloc_node() argument 34 node_index = irdma_alloc_ws_node_id(vsi->dev); in irdma_alloc_node() 43 node->vsi_index = vsi->vsi_idx; in irdma_alloc_node() 47 node->traffic_class = vsi->qos[user_pri].traffic_class; in irdma_alloc_node() 49 node->rel_bw = vsi->qos[user_pri].rel_bw; in irdma_alloc_node() 53 node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle; in irdma_alloc_node() 71 static void irdma_free_node(struct irdma_sc_vsi *vsi, in irdma_free_node() argument 77 irdma_free_ws_node_id(vsi->dev, node->index); in irdma_free_node() 91 irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd) in irdma_ws_cqp_cmd() argument 96 node_info.vsi = node->vsi_index; in irdma_ws_cqp_cmd() [all …]
|
| D | main.c | 45 iwdev->vsi.tc_change_pending = true; in irdma_prep_tc_change() 46 irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND); in irdma_prep_tc_change() 50 !atomic_read(&iwdev->vsi.qp_suspend_reqs), in irdma_prep_tc_change() 52 irdma_ws_reset(&iwdev->vsi); in irdma_prep_tc_change() 91 if (iwdev->vsi.mtu != iwdev->netdev->mtu) { in irdma_iidc_event_handler() 95 irdma_change_l2params(&iwdev->vsi, &l2params); in irdma_iidc_event_handler() 98 if (iwdev->vsi.tc_change_pending) in irdma_iidc_event_handler() 105 if (!iwdev->vsi.tc_change_pending) in irdma_iidc_event_handler() 113 irdma_change_l2params(&iwdev->vsi, &l2params); in irdma_iidc_event_handler() 160 static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi, in irdma_lan_register_qset() argument [all …]
|
| /Linux-v5.15/drivers/media/platform/mtk-vcodec/vdec/ |
| D | vdec_vp9_if.c | 201 struct vdec_vp9_vsi *vsi; member 209 struct vdec_vp9_vsi *vsi = inst->vsi; in vp9_is_sf_ref_fb() local 211 for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) { in vp9_is_sf_ref_fb() 212 if (fb == &vsi->sf_ref_fb[i].fb) in vp9_is_sf_ref_fb() 264 struct vdec_vp9_vsi *vsi = inst->vsi; in vp9_ref_cnt_fb() local 267 if (ref_idx >= 0 && vsi->frm_bufs[ref_idx].ref_cnt > 0) { in vp9_ref_cnt_fb() 268 vsi->frm_bufs[ref_idx].ref_cnt--; in vp9_ref_cnt_fb() 270 if (vsi->frm_bufs[ref_idx].ref_cnt == 0) { in vp9_ref_cnt_fb() 272 vsi->frm_bufs[ref_idx].buf.fb)) { in vp9_ref_cnt_fb() 276 vsi->frm_bufs[ref_idx].buf.fb->base_y.va); in vp9_ref_cnt_fb() [all …]
|
| D | vdec_h264_if.c | 132 struct vdec_h264_vsi *vsi; member 151 inst->vsi->pred_buf_dma = inst->pred_buf.dma_addr; in allocate_predication_buf() 161 inst->vsi->pred_buf_dma = 0; in free_predication_buf() 184 inst->vsi->mv_buf_dma[i] = mem->dma_addr; in alloc_mv_buf() 196 inst->vsi->mv_buf_dma[i] = 0; in free_mv_buf() 207 list = disp_list ? &inst->vsi->list_disp : &inst->vsi->list_free; in check_list_validity() 229 list = &inst->vsi->list_free; in put_fb_to_free() 248 *pic = inst->vsi->pic; in get_pic_info() 257 cr->left = inst->vsi->crop.left; in get_crop_info() 258 cr->top = inst->vsi->crop.top; in get_crop_info() [all …]
|
| /Linux-v5.15/drivers/net/ethernet/intel/i40e/ |
| D | i40e_main.c | 31 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); 33 static int i40e_add_vsi(struct i40e_vsi *vsi); 34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 270 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id() 271 return pf->vsi[i]; in i40e_find_vsi_from_id() 302 struct i40e_vsi *vsi = np->vsi; in i40e_tx_timeout() local 303 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout() 311 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout() 312 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout() 314 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout() [all …]
|
| D | i40e_debugfs.c | 32 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) in i40e_dbg_find_vsi() 33 return pf->vsi[i]; in i40e_dbg_find_vsi() 89 pf->vsi[pf->lan_vsi]->netdev->name, in i40e_dbg_command_read() 119 struct i40e_vsi *vsi; in i40e_dbg_dump_vsi_seid() local 122 vsi = i40e_dbg_find_vsi(pf, seid); in i40e_dbg_dump_vsi_seid() 123 if (!vsi) { in i40e_dbg_dump_vsi_seid() 129 if (vsi->netdev) { in i40e_dbg_dump_vsi_seid() 130 struct net_device *nd = vsi->netdev; in i40e_dbg_dump_vsi_seid() 143 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); in i40e_dbg_dump_vsi_seid() 147 i, vsi->state[i]); in i40e_dbg_dump_vsi_seid() [all …]
|
| D | i40e_virtchnl_pf.c | 229 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); in i40e_vc_isvalid_vsi_id() local 231 return (vsi && (vsi->vf_id == vf->vf_id)); in i40e_vc_isvalid_vsi_id() 246 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); in i40e_vc_isvalid_queue_id() local 248 return (vsi && (qid < vsi->alloc_queue_pairs)); in i40e_vc_isvalid_queue_id() 279 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); in i40e_vc_get_pf_queue_id() local 282 if (!vsi) in i40e_vc_get_pf_queue_id() 285 if (le16_to_cpu(vsi->info.mapping_flags) & in i40e_vc_get_pf_queue_id() 288 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); in i40e_vc_get_pf_queue_id() 290 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + in i40e_vc_get_pf_queue_id() 603 struct i40e_vsi *vsi; in i40e_config_vsi_tx_queue() local [all …]
|
| D | i40e_ethtool.c | 1061 struct i40e_pf *pf = np->vsi->back; in i40e_get_link_ksettings() 1153 struct i40e_pf *pf = np->vsi->back; in i40e_set_link_ksettings() 1154 struct i40e_vsi *vsi = np->vsi; in i40e_set_link_ksettings() local 1169 if (vsi != pf->vsi[pf->lan_vsi]) in i40e_set_link_ksettings() 1351 i40e_print_link_message(vsi, false); in i40e_set_link_ksettings() 1388 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_cfg() 1450 struct i40e_pf *pf = np->vsi->back; in i40e_get_fec_param() 1492 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_param() 1536 struct i40e_pf *pf = np->vsi->back; in i40e_nway_reset() 1563 struct i40e_pf *pf = np->vsi->back; in i40e_get_pauseparam() [all …]
|
| D | i40e.h | 612 struct i40e_vsi **vsi; member 947 struct i40e_vsi *vsi; member 954 struct i40e_vsi *vsi; member 1032 struct i40e_vsi *vsi = np->vsi; in i40e_netdev_to_pf() local 1034 return vsi->back; in i40e_netdev_to_pf() 1037 static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi, in i40e_vsi_setup_irqhandler() argument 1040 vsi->irq_handler = irq_handler; in i40e_vsi_setup_irqhandler() 1090 int i40e_up(struct i40e_vsi *vsi); 1091 void i40e_down(struct i40e_vsi *vsi); 1095 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); [all …]
|
| D | i40e_xsk.c | 41 static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, in i40e_xsk_pool_enable() argument 45 struct net_device *netdev = vsi->netdev; in i40e_xsk_pool_enable() 49 if (vsi->type != I40E_VSI_MAIN) in i40e_xsk_pool_enable() 52 if (qid >= vsi->num_queue_pairs) in i40e_xsk_pool_enable() 59 err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); in i40e_xsk_pool_enable() 63 set_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_pool_enable() 65 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); in i40e_xsk_pool_enable() 68 err = i40e_queue_pair_disable(vsi, qid); in i40e_xsk_pool_enable() 72 err = i40e_queue_pair_enable(vsi, qid); in i40e_xsk_pool_enable() 77 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); in i40e_xsk_pool_enable() [all …]
|