Lines Matching +full:0 +full:xc03

56  * Last entry must be all 0s
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
89 {0, }
95 module_param(debug, uint, 0);
96 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
115 if (ha->refcount <= 0) in netdev_hw_addr_refcnt()
140 return 0; in i40e_allocate_dma_mem_d()
154 mem->pa = 0; in i40e_free_dma_mem_d()
155 mem->size = 0; in i40e_free_dma_mem_d()
157 return 0; in i40e_free_dma_mem_d()
175 return 0; in i40e_allocate_virt_mem_d()
188 mem->size = 0; in i40e_free_virt_mem_d()
190 return 0; in i40e_free_virt_mem_d()
208 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { in i40e_get_lump()
210 "param err: pile=%s needed=%d id=0x%04x\n", in i40e_get_lump()
229 i = 0; in i40e_get_lump()
238 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { in i40e_get_lump()
245 for (j = 0; j < needed; j++) in i40e_get_lump()
269 int count = 0; in i40e_put_lump()
278 pile->list[i] = 0; in i40e_put_lump()
295 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_find_vsi_from_id()
337 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout()
367 …v_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: in i40e_tx_timeout()
455 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_get_netdev_stats_struct()
509 memset(ns, 0, sizeof(*ns)); in i40e_vsi_reset_stats()
510 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); in i40e_vsi_reset_stats()
511 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); in i40e_vsi_reset_stats()
512 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); in i40e_vsi_reset_stats()
513 if (vsi->rx_rings && vsi->rx_rings[0]) { in i40e_vsi_reset_stats()
514 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_reset_stats()
515 memset(&vsi->rx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
517 memset(&vsi->rx_rings[i]->rx_stats, 0, in i40e_vsi_reset_stats()
519 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
521 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats()
536 memset(&pf->stats, 0, sizeof(pf->stats)); in i40e_pf_reset_stats()
537 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); in i40e_pf_reset_stats()
540 for (i = 0; i < I40E_MAX_VEB; i++) { in i40e_pf_reset_stats()
542 memset(&pf->veb[i]->stats, 0, in i40e_pf_reset_stats()
544 memset(&pf->veb[i]->stats_offsets, 0, in i40e_pf_reset_stats()
546 memset(&pf->veb[i]->tc_stats, 0, in i40e_pf_reset_stats()
548 memset(&pf->veb[i]->tc_stats_offsets, 0, in i40e_pf_reset_stats()
553 pf->hw_csum_rx_error = 0; in i40e_pf_reset_stats()
619 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; in i40e_stat_update48()
629 *stat &= 0xFFFFFFFFFFFFULL; in i40e_stat_update48()
774 int i, idx = 0; in i40e_update_veb_stats()
786 if (hw->revision_id > 0) in i40e_update_veb_stats()
816 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_update_veb_stats()
882 rx_b = rx_p = 0; in i40e_update_vsi_stats()
883 tx_b = tx_p = 0; in i40e_update_vsi_stats()
884 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; in i40e_update_vsi_stats()
885 tx_stopped = 0; in i40e_update_vsi_stats()
886 rx_page = 0; in i40e_update_vsi_stats()
887 rx_buf = 0; in i40e_update_vsi_stats()
888 rx_reuse = 0; in i40e_update_vsi_stats()
889 rx_alloc = 0; in i40e_update_vsi_stats()
890 rx_waive = 0; in i40e_update_vsi_stats()
891 rx_busy = 0; in i40e_update_vsi_stats()
893 for (q = 0; q < vsi->num_queue_pairs; q++) { in i40e_update_vsi_stats()
1082 for (i = 0; i < 8; i++) { in i40e_update_pf_stats()
1244 int cnt = 0; in i40e_count_filters()
1344 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1346 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1358 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1385 * filters marked as VLAN=0 in i40e_correct_mac_vlan_filters()
1387 * which are marked as VLAN=0 must be replaced with filters in i40e_correct_mac_vlan_filters()
1396 new->f->vlan = 0; in i40e_correct_mac_vlan_filters()
1397 else if (!vlan_filters && new->f->vlan == 0) in i40e_correct_mac_vlan_filters()
1410 (!vlan_filters && f->vlan == 0)) { in i40e_correct_mac_vlan_filters()
1415 new_vlan = 0; in i40e_correct_mac_vlan_filters()
1444 return 0; in i40e_correct_mac_vlan_filters()
1482 (is_any && !vlan_filters && f->vlan == 0)) { in i40e_get_vf_new_vlan()
1486 return 0; in i40e_get_vf_new_vlan()
1503 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1552 return 0; in i40e_correct_vf_mac_vlan_filters()
1572 memset(&element, 0, sizeof(element)); in i40e_rm_default_mac_filter()
1574 element.vlan_tag = 0; in i40e_rm_default_mac_filter()
1579 memset(&element, 0, sizeof(element)); in i40e_rm_default_mac_filter()
1581 element.vlan_tag = 0; in i40e_rm_default_mac_filter()
1617 if (vlan >= 0) in i40e_add_filter()
1750 * Returns 0 for success, or error
1768 return 0; in i40e_del_mac_filter()
1778 * Returns 0 on success, negative on failure
1794 return 0; in i40e_set_mac()
1834 return 0; in i40e_set_mac()
1849 int ret = 0; in i40e_config_rss_aq()
1890 return 0; in i40e_vsi_config_rss()
1928 u16 qcount = 0, max_qcount, qmap, sections = 0; in i40e_vsi_setup_queue_map_mqprio()
1930 u8 netdev_tc = 0, offset = 0; in i40e_vsi_setup_queue_map_mqprio()
1938 num_qps = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1948 max_qcount = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1949 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_vsi_setup_queue_map_mqprio()
1964 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map_mqprio()
1966 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map_mqprio()
1973 /* Setup queue TC[0].qmap for given VSI context */ in i40e_vsi_setup_queue_map_mqprio()
1974 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio()
1976 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio()
1995 override_q = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
2000 return 0; in i40e_vsi_setup_queue_map_mqprio()
2018 u16 num_tc_qps = 0; in i40e_vsi_setup_queue_map()
2019 u16 sections = 0; in i40e_vsi_setup_queue_map()
2020 u8 netdev_tc = 0; in i40e_vsi_setup_queue_map()
2028 offset = 0; in i40e_vsi_setup_queue_map()
2030 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); in i40e_vsi_setup_queue_map()
2042 if (vsi->req_queue_pairs > 0) in i40e_vsi_setup_queue_map()
2052 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) in i40e_vsi_setup_queue_map()
2059 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_vsi_setup_queue_map()
2080 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_vsi_setup_queue_map()
2101 WARN_ON(i != 0); in i40e_vsi_setup_queue_map()
2109 pow = 0; in i40e_vsi_setup_queue_map()
2126 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map()
2128 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map()
2130 qmap = 0; in i40e_vsi_setup_queue_map()
2136 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || in i40e_vsi_setup_queue_map()
2149 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_setup_queue_map()
2155 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map()
2174 return 0; in i40e_addr_sync()
2198 return 0; in i40e_addr_unsync()
2202 return 0; in i40e_addr_unsync()
2300 * number of successful filters. Note that 0 does NOT mean success!
2307 int retval = 0; in i40e_update_filter_state()
2310 for (i = 0; i < count; i++) { in i40e_update_filter_state()
2342 * be set to 0. This ensures that a sequence of calls to this function
2465 * Returns 0 on success.
2531 * Returns 0 or error value
2540 unsigned int failed_filters = 0; in i40e_sync_vsi_filters()
2541 unsigned int vlan_filters = 0; in i40e_sync_vsi_filters()
2543 int filter_list_len = 0; in i40e_sync_vsi_filters()
2544 i40e_status aq_ret = 0; in i40e_sync_vsi_filters()
2545 u32 changed_flags = 0; in i40e_sync_vsi_filters()
2548 int num_add = 0; in i40e_sync_vsi_filters()
2549 int num_del = 0; in i40e_sync_vsi_filters()
2550 int retval = 0; in i40e_sync_vsi_filters()
2610 if (f->vlan > 0) in i40e_sync_vsi_filters()
2643 cmd_flags = 0; in i40e_sync_vsi_filters()
2659 del_list[num_del].vlan_tag = 0; in i40e_sync_vsi_filters()
2674 memset(del_list, 0, list_size); in i40e_sync_vsi_filters()
2675 num_del = 0; in i40e_sync_vsi_filters()
2703 num_add = 0; in i40e_sync_vsi_filters()
2718 if (num_add == 0) in i40e_sync_vsi_filters()
2720 cmd_flags = 0; in i40e_sync_vsi_filters()
2724 add_list[num_add].vlan_tag = 0; in i40e_sync_vsi_filters()
2730 add_list[num_add].queue_number = 0; in i40e_sync_vsi_filters()
2741 memset(add_list, 0, list_size); in i40e_sync_vsi_filters()
2742 num_add = 0; in i40e_sync_vsi_filters()
2768 vsi->active_filters = 0; in i40e_sync_vsi_filters()
2787 vsi->promisc_threshold = 0; in i40e_sync_vsi_filters()
2882 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_sync_filters_subtask()
2915 * Returns 0 on success, negative on failure
2937 return 0; in i40e_change_mtu()
2976 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) in i40e_vlan_stripping_enable()
3033 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3074 return 0; in i40e_add_vlan_all_mac()
3089 /* The network stack will attempt to add VID=0, with the intention to in i40e_vsi_add_vlan()
3090 * receive priority tagged packets with a VLAN of 0. Our HW receives in i40e_vsi_add_vlan()
3093 * Additionally, HW interprets adding a VID=0 filter as meaning to in i40e_vsi_add_vlan()
3095 * Thus, we do not want to actually add a filter for VID=0 in i40e_vsi_add_vlan()
3098 return 0; in i40e_vsi_add_vlan()
3111 return 0; in i40e_vsi_add_vlan()
3117 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3172 int ret = 0; in i40e_vlan_rx_add_vid()
3223 return 0; in i40e_vlan_rx_kill_vid()
3275 return 0; in i40e_vsi_add_pvid()
3286 vsi->info.pvid = 0; in i40e_vsi_remove_pvid()
3299 * Return 0 on success, negative on failure
3303 int i, err = 0; in i40e_vsi_setup_tx_resources()
3305 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3311 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3328 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3334 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3348 * Return 0 on success, negative on failure
3352 int i, err = 0; in i40e_vsi_setup_rx_resources()
3354 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_rx_resources()
3372 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_rx_resources()
3432 i40e_status err = 0; in i40e_configure_tx_ring()
3433 u32 qtx_ctl = 0; in i40e_configure_tx_ring()
3441 ring->atr_count = 0; in i40e_configure_tx_ring()
3443 ring->atr_sample_rate = 0; in i40e_configure_tx_ring()
3450 memset(&tx_ctx, 0, sizeof(tx_ctx)); in i40e_configure_tx_ring()
3482 tx_ctx.rdylist_act = 0; in i40e_configure_tx_ring()
3530 return 0; in i40e_configure_tx_ring()
3541 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; in i40e_rx_offset()
3557 i40e_status err = 0; in i40e_configure_rx_ring()
3564 memset(&rx_ctx, 0, sizeof(rx_ctx)); in i40e_configure_rx_ring()
3605 rx_ctx.dsize = 0; in i40e_configure_rx_ring()
3608 * rx_ctx.dtype = 0; in i40e_configure_rx_ring()
3610 rx_ctx.hsplit_0 = 0; in i40e_configure_rx_ring()
3613 if (hw->revision_id == 0) in i40e_configure_rx_ring()
3614 rx_ctx.lrxqthresh = 0; in i40e_configure_rx_ring()
3620 rx_ctx.showiv = 0; in i40e_configure_rx_ring()
3652 writel(0, ring->tail); in i40e_configure_rx_ring()
3670 return 0; in i40e_configure_rx_ring()
3681 int err = 0; in i40e_vsi_configure_tx()
3684 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3690 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3704 int err = 0; in i40e_vsi_configure_rx()
3723 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_configure_rx()
3741 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_config_dcb_rings()
3744 rx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3745 tx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3750 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { in i40e_vsi_config_dcb_rings()
3779 * Set all flow director counters to 0.
3783 pf->fd_tcp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3784 pf->fd_udp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3785 pf->fd_sctp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3786 pf->fd_ip4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3787 pf->fd_tcp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3788 pf->fd_udp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3789 pf->fd_sctp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3790 pf->fd_ip6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3851 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) in i40e_vsi_configure_msix()
3855 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { in i40e_vsi_configure_msix()
3877 for (q = 0; q < q_vector->num_ringpairs; q++) { in i40e_vsi_configure_msix()
3932 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ in i40e_enable_misc_int_causes()
3952 /* SW_ITR_IDX = 0, but don't change INTENA */ in i40e_enable_misc_int_causes()
3956 /* OTHER_ITR_IDX = 0 */ in i40e_enable_misc_int_causes()
3957 wr32(hw, I40E_PFINT_STAT_CTL0, 0); in i40e_enable_misc_int_causes()
3966 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; in i40e_configure_msi_and_legacy()
3967 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_configure_msi_and_legacy()
3973 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
3977 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
3983 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ in i40e_configure_msi_and_legacy()
3984 wr32(hw, I40E_PFINT_LNKLST0, 0); in i40e_configure_msi_and_legacy()
3989 wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX)); in i40e_configure_msi_and_legacy()
3994 I40E_QINT_TQCTL_VAL(nextqp, 0, TX)); in i40e_configure_msi_and_legacy()
3998 wr32(hw, I40E_QINT_TQCTL(0), in i40e_configure_msi_and_legacy()
3999 I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX)); in i40e_configure_msi_and_legacy()
4089 int rx_int_idx = 0; in i40e_vsi_request_irq_msix()
4090 int tx_int_idx = 0; in i40e_vsi_request_irq_msix()
4095 for (vector = 0; vector < q_vectors; vector++) { in i40e_vsi_request_irq_msix()
4116 0, in i40e_vsi_request_irq_msix()
4140 return 0; in i40e_vsi_request_irq_msix()
4165 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_disable_irq()
4178 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
4185 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); in i40e_vsi_disable_irq()
4188 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_disable_irq()
4192 wr32(hw, I40E_PFINT_ICR0_ENA, 0); in i40e_vsi_disable_irq()
4193 wr32(hw, I40E_PFINT_DYN_CTL0, 0); in i40e_vsi_disable_irq()
4209 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_enable_irq()
4216 return 0; in i40e_vsi_enable_irq()
4225 /* Disable ICR 0 */ in i40e_free_misc_vector()
4226 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); in i40e_free_misc_vector()
4230 free_irq(pf->msix_entries[0].vector, pf); in i40e_free_misc_vector()
4256 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) in i40e_intr()
4260 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || in i40e_intr()
4274 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_intr()
4330 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", in i40e_intr()
4353 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", in i40e_intr()
4414 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4415 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4423 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_fdir_tx_irq()
4434 tx_buf->tx_flags = 0; in i40e_clean_fdir_tx_irq()
4436 dma_unmap_len_set(tx_buf, len, 0); in i40e_clean_fdir_tx_irq()
4437 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4438 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4447 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_fdir_tx_irq()
4460 return budget > 0; in i40e_clean_fdir_tx_irq()
4529 int v_start = 0; in i40e_vsi_map_rings_to_vectors()
4530 int qp_idx = 0; in i40e_vsi_map_rings_to_vectors()
4547 q_vector->rx.count = 0; in i40e_vsi_map_rings_to_vectors()
4548 q_vector->tx.count = 0; in i40e_vsi_map_rings_to_vectors()
4573 err = request_irq(pf->pdev->irq, i40e_intr, 0, in i40e_vsi_request_irq()
4605 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_netpoll()
4606 i40e_msix_clean_rings(0, vsi->q_vectors[i]); in i40e_netpoll()
4624 * multiple retries; else will return 0 in case of success.
4631 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { in i40e_pf_txq_wait()
4641 return 0; in i40e_pf_txq_wait()
4665 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { in i40e_control_tx_q()
4679 wr32(hw, I40E_QTX_HEAD(pf_q), 0); in i40e_control_tx_q()
4722 int i, pf_q, ret = 0; in i40e_vsi_enable_tx()
4725 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_tx()
4753 * multiple retries; else will return 0 in case of success.
4760 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { in i40e_pf_rxq_wait()
4770 return 0; in i40e_pf_rxq_wait()
4789 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { in i40e_control_rx_q()
4822 int ret = 0; in i40e_control_wait_rx_q()
4841 int i, pf_q, ret = 0; in i40e_vsi_enable_rx()
4844 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_rx()
4863 int ret = 0; in i40e_vsi_start_rings()
4904 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); in i40e_vsi_stop_rings()
4926 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_stop_rings_no_wait()
4952 for (i = 0; i < vsi->num_q_vectors; i++) { in i40e_vsi_free_irq()
5095 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) in i40e_vsi_free_q_vectors()
5135 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); in i40e_clear_interrupt_scheme()
5136 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_clear_interrupt_scheme()
5153 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_enable_all()
5172 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_disable_all()
5192 vsi->current_netdev_flags = 0; in i40e_vsi_close()
5237 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_quiesce_all_vsi()
5251 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_unquiesce_all_vsi()
5269 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_wait_queues_disabled()
5302 return 0; in i40e_vsi_wait_queues_disabled()
5315 int v, ret = 0; in i40e_pf_wait_queues_disabled()
5317 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { in i40e_pf_wait_queues_disabled()
5346 for (i = 0; i < dcbcfg->numapps; i++) { in i40e_get_iscsi_tc_map()
5367 int i, tc_unused = 0; in i40e_dcb_get_num_tc()
5368 u8 num_tc = 0; in i40e_dcb_get_num_tc()
5369 u8 ret = 0; in i40e_dcb_get_num_tc()
5375 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) in i40e_dcb_get_num_tc()
5381 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_dcb_get_num_tc()
5414 for (i = 0; i < num_tc; i++) in i40e_dcb_get_enabled_tc()
5448 u8 num_tc = 0; in i40e_pf_get_num_tc()
5468 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_pf_get_num_tc()
5507 * Returns 0 on success, negative value on failure
5511 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; in i40e_vsi_get_bw_info()
5512 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; in i40e_vsi_get_bw_info()
5542 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", in i40e_vsi_get_bw_info()
5550 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | in i40e_vsi_get_bw_info()
5552 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_vsi_get_bw_info()
5557 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); in i40e_vsi_get_bw_info()
5560 return 0; in i40e_vsi_get_bw_info()
5569 * Returns 0 on success, negative value on failure
5581 return 0; in i40e_vsi_configure_bw_alloc()
5583 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); in i40e_vsi_configure_bw_alloc()
5590 memset(&bw_data, 0, sizeof(bw_data)); in i40e_vsi_configure_bw_alloc()
5592 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) in i40e_vsi_configure_bw_alloc()
5603 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) in i40e_vsi_configure_bw_alloc()
5606 return 0; in i40e_vsi_configure_bw_alloc()
5620 u8 netdev_tc = 0; in i40e_vsi_config_netdev_tc()
5637 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_vsi_config_netdev_tc()
5641 * enabled_tc bitmap would be 0x00001001; the driver in i40e_vsi_config_netdev_tc()
5643 * referenced by the netdev layer as TC 0 and 1. in i40e_vsi_config_netdev_tc()
5656 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { in i40e_vsi_config_netdev_tc()
5731 vsi->info.valid_sections = 0; in i40e_update_adq_vsi_queues()
5751 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; in i40e_vsi_config_tc()
5755 int ret = 0; in i40e_vsi_config_tc()
5764 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_vsi_config_tc()
5771 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; in i40e_vsi_config_tc()
5793 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n", in i40e_vsi_config_tc()
5810 ctxt.vf_num = 0; in i40e_vsi_config_tc()
5854 vsi->info.valid_sections = 0; in i40e_vsi_config_tc()
5928 u64 credits = 0; in i40e_set_bw_limit()
5929 int speed = 0; in i40e_set_bw_limit()
5930 int ret = 0; in i40e_set_bw_limit()
5945 /* Tx rate credits are in values of 50Mbps, 0 is disabled */ in i40e_set_bw_limit()
5976 vsi->current_rss_size = 0; in i40e_remove_queue_channels()
5992 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_remove_queue_channels()
6005 ret = i40e_set_bw_limit(vsi, ch->seid, 0); in i40e_remove_queue_channels()
6056 int max = 0; in i40e_get_max_queues_for_channel()
6119 return 0; in i40e_validate_num_queues()
6191 u16 qcount, qmap, sections = 0; in i40e_channel_setup_queue_map()
6192 u8 offset = 0; in i40e_channel_setup_queue_map()
6209 /* Setup queue TC[0].qmap for given VSI context */ in i40e_channel_setup_queue_map()
6210 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_channel_setup_queue_map()
6212 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ in i40e_channel_setup_queue_map()
6214 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); in i40e_channel_setup_queue_map()
6231 u8 enabled_tc = 0x1; /* TC0 enabled */ in i40e_add_channel()
6240 memset(&ctxt, 0, sizeof(ctxt)); in i40e_add_channel()
6242 ctxt.vf_num = 0; in i40e_add_channel()
6287 return 0; in i40e_add_channel()
6297 memset(&bw_data, 0, sizeof(bw_data)); in i40e_channel_config_bw()
6299 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) in i40e_channel_config_bw()
6311 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) in i40e_channel_config_bw()
6314 return 0; in i40e_channel_config_bw()
6332 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; in i40e_channel_config_tx_ring()
6335 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_channel_config_tx_ring()
6349 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_channel_config_tx_ring()
6366 return 0; in i40e_channel_config_tx_ring()
6485 return 0; in i40e_validate_and_set_switch_mode()
6618 return 0; in i40e_create_queue_channel()
6630 u64 max_rate = 0; in i40e_configure_queue_channels()
6631 int ret = 0, i; in i40e_configure_queue_channels()
6634 vsi->tc_seid_map[0] = vsi->seid; in i40e_configure_queue_channels()
6687 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; in i40e_veb_config_tc()
6689 int ret = 0; in i40e_veb_config_tc()
6700 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_veb_config_tc()
6739 u8 tc_map = 0; in i40e_dcb_reconfigure()
6748 for (v = 0; v < I40E_MAX_VEB; v++) { in i40e_dcb_reconfigure()
6761 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_dcb_reconfigure()
6856 return 0; in i40e_hw_set_dcb_config()
6901 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0}; in i40e_hw_dcb_config()
6910 u8 lltc_map = 0; in i40e_hw_dcb_config()
6911 u8 tc_map = 0; in i40e_hw_dcb_config()
6933 memset(&ets_data, 0, sizeof(ets_data)); in i40e_hw_dcb_config()
6934 for (i = 0; i < new_numtc; i++) { in i40e_hw_dcb_config()
7006 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_hw_dcb_config()
7080 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); in i40e_dcb_sw_default_config()
7082 pf->tmp_cfg.etscfg.maxtcs = 0; in i40e_dcb_sw_default_config()
7083 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7084 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; in i40e_dcb_sw_default_config()
7089 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE; in i40e_dcb_sw_default_config()
7090 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO; in i40e_dcb_sw_default_config()
7091 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE; in i40e_dcb_sw_default_config()
7096 memset(&ets_data, 0, sizeof(ets_data)); in i40e_dcb_sw_default_config()
7098 ets_data.tc_strict_priority_flags = 0; /* ETS */ in i40e_dcb_sw_default_config()
7099 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */ in i40e_dcb_sw_default_config()
7116 dcb_cfg->etscfg.cbs = 0; in i40e_dcb_sw_default_config()
7118 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7368 pf->fd_add_err = 0; in i40e_up_complete()
7369 pf->fd_atr_cnt = 0; in i40e_up_complete()
7379 return 0; in i40e_up_complete()
7409 struct i40e_aq_set_phy_config config = {0}; in i40e_force_link_state()
7450 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) in i40e_force_link_state()
7459 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; in i40e_force_link_state()
7461 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; in i40e_force_link_state()
7470 if (abilities.link_speed != 0) in i40e_force_link_state()
7548 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_down()
7570 u64 sum_max_rate = 0; in i40e_validate_mqprio_qopt()
7571 u64 max_rate = 0; in i40e_validate_mqprio_qopt()
7574 if (mqprio_qopt->qopt.offset[0] != 0 || in i40e_validate_mqprio_qopt()
7578 for (i = 0; ; i++) { in i40e_validate_mqprio_qopt()
7583 "Invalid min tx rate (greater than 0) specified\n"); in i40e_validate_mqprio_qopt()
7607 return 0; in i40e_validate_mqprio_qopt()
7624 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_vsi_set_default_tc_config()
7628 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_set_default_tc_config()
7629 if (i == 0) in i40e_vsi_set_default_tc_config()
7633 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_set_default_tc_config()
7645 * macvlan. Returns 0 on success.
7653 memset(&element, 0, sizeof(element)); in i40e_del_macvlan_filter()
7655 element.vlan_tag = 0; in i40e_del_macvlan_filter()
7671 * macvlan. Returns 0 on success.
7678 u16 cmd_flags = 0; in i40e_add_macvlan_filter()
7681 element.vlan_tag = 0; in i40e_add_macvlan_filter()
7682 element.queue_number = 0; in i40e_add_macvlan_filter()
7703 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_reset_ch_rings()
7735 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_free_macvlan_channels()
7756 vsi->macvlan_cnt = 0; in i40e_free_macvlan_channels()
7769 int ret = 0, num_tc = 1, i, aq_err; in i40e_fwd_ring_up()
7778 for (i = 0; i < num_tc; i++) in i40e_fwd_ring_up()
7783 for (i = 0; i < iter->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7815 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7848 int i, pow, ret = 0; in i40e_setup_macvlans()
7849 u8 offset = 0; in i40e_setup_macvlans()
7865 memset(&ctxt, 0, sizeof(ctxt)); in i40e_setup_macvlans()
7868 ctxt.vf_num = 0; in i40e_setup_macvlans()
7871 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_setup_macvlans()
7873 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_setup_macvlans()
7904 vsi->info.valid_sections = 0; in i40e_setup_macvlans()
7908 for (i = 0; i < macvlan_cnt; i++) { in i40e_setup_macvlans()
7944 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors; in i40e_fwd_add()
7970 /* reserve bit 0 for the pf device */ in i40e_fwd_add()
7971 set_bit(0, vsi->fwd_bitmask); in i40e_fwd_add()
8000 if (macvlan_cnt == 0) in i40e_fwd_add()
8038 netdev_set_sb_channel(vdev, 0); in i40e_fwd_add()
8056 int aq_err, ret = 0; in i40e_del_all_macvlans()
8072 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_del_all_macvlans()
8093 int aq_err, ret = 0; in i40e_fwd_del()
8108 netdev_set_sb_channel(fwd->netdev, 0); in i40e_fwd_del()
8133 u8 enabled_tc = 0, num_tc, hw; in i40e_setup_tc()
8196 for (i = 0; i < num_tc; i++) in i40e_setup_tc()
8202 return 0; in i40e_setup_tc()
8218 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { in i40e_setup_tc()
8221 vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8229 vsi->seid, vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8232 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_setup_tc()
8234 vsi->mqprio_qopt.max_rate[0]); in i40e_setup_tc()
8287 memset(cld, 0, sizeof(*cld)); in i40e_set_cld_element()
8296 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) { in i40e_set_cld_element()
8323 * Returns 0 if the filter were successfully added.
8351 memset(&cld_filter, 0, sizeof(cld_filter)); in i40e_add_del_cloud_filter()
8392 * Returns 0 if the filter were successfully added.
8421 memset(&cld_filter, 0, sizeof(cld_filter)); in i40e_add_del_cloud_filter_big_buf()
8508 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; in i40e_parse_cls_flower()
8510 u8 field_flags = 0; in i40e_parse_cls_flower()
8521 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", in i40e_parse_cls_flower()
8530 if (match.mask->keyid != 0) in i40e_parse_cls_flower()
8544 n_proto_key = 0; in i40e_parse_cls_flower()
8545 n_proto_mask = 0; in i40e_parse_cls_flower()
8556 /* use is_broadcast and is_zero to check for all 0xf or 0 */ in i40e_parse_cls_flower()
8589 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", in i40e_parse_cls_flower()
8610 if (match.mask->dst == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8620 if (match.mask->src == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8643 * (0:0:0:0:0:0:0:1), which can be represented as ::1 in i40e_parse_cls_flower()
8666 if (match.mask->src == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8669 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", in i40e_parse_cls_flower()
8676 if (match.mask->dst == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8679 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", in i40e_parse_cls_flower()
8699 return 0; in i40e_parse_cls_flower()
8715 if (tc == 0) { in i40e_handle_tclass()
8717 return 0; in i40e_handle_tclass()
8731 return 0; in i40e_handle_tclass()
8749 int err = 0; in i40e_configure_clsflower()
8751 if (tc < 0) { in i40e_configure_clsflower()
8786 if (err < 0) in i40e_configure_clsflower()
8790 if (err < 0) in i40e_configure_clsflower()
8848 int err = 0; in i40e_delete_clsflower()
8878 return 0; in i40e_delete_clsflower()
8950 * Returns 0 on success, negative value on failure
8982 return 0; in i40e_open()
9012 * Returns 0 on success, negative value on failure
9063 return 0; in i40e_vsi_open()
9110 pf->fdir_pf_active_filters = 0; in i40e_fdir_filter_exit()
9175 pf->num_cloud_filters = 0; in i40e_cloud_filter_exit()
9193 * Returns 0, this is not allowed to fail
9202 return 0; in i40e_close()
9279 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9292 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9304 "bad reset request 0x%08x\n", reset_flags); in i40e_do_reset()
9377 int ret = 0; in i40e_handle_lldp_event()
9395 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); in i40e_handle_lldp_event()
9415 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); in i40e_handle_lldp_event()
9520 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", in i40e_handle_lan_overflow_event()
9624 pf->fd_inv = 0; in i40e_delete_invalid_filter()
9701 (pf->fd_add_err == 0) || in i40e_fdir_check_and_reenable()
9710 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0) in i40e_fdir_check_and_reenable()
9714 if (pf->fd_inv > 0) { in i40e_fdir_check_and_reenable()
9761 pf->fd_add_err = 0; in i40e_fdir_flush_and_replay()
9859 for (i = 0; i < I40E_MAX_VEB; i++) in i40e_veb_link_event()
9864 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_veb_link_event()
9937 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); in i40e_link_event()
9978 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_watchdog_subtask()
9984 for (i = 0; i < I40E_MAX_VEB; i++) in i40e_watchdog_subtask()
9999 u32 reset_flags = 0; in i40e_reset_subtask()
10088 u16 pending, i = 0; in i40e_clean_adminq_subtask()
10189 "ARQ NVM operation 0x%04x completed\n", in i40e_clean_adminq_subtask()
10194 "ARQ: Unknown event 0x%04x ignored\n", in i40e_clean_adminq_subtask()
10251 ctxt.vf_num = 0; in i40e_enable_pf_switch_lb()
10287 ctxt.vf_num = 0; in i40e_disable_pf_switch_lb()
10347 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { in i40e_reconstitute_veb()
10384 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_reconstitute_veb()
10404 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { in i40e_reconstitute_veb()
10469 "switch_mode=0x%04x, function_valid=0x%08x\n", in i40e_get_capabilities()
10484 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ in i40e_get_capabilities()
10486 if (pf->hw.revision_id == 0 && in i40e_get_capabilities()
10494 return 0; in i40e_get_capabilities()
10510 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { in i40e_fdir_sb_setup()
10512 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, in i40e_fdir_sb_setup()
10513 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, in i40e_fdir_sb_setup()
10514 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, in i40e_fdir_sb_setup()
10515 0x95b3a76d}; in i40e_fdir_sb_setup()
10518 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) in i40e_fdir_sb_setup()
10531 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
10593 return 0; in i40e_rebuild_cloud_filters()
10608 return 0; in i40e_rebuild_channels()
10654 return 0; in i40e_rebuild_channels()
10666 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_clean_xps_state()
10681 i40e_status ret = 0; in i40e_prep_for_reset()
10695 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_prep_for_reset()
10698 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
10726 dv.major_version = 0xff; in i40e_send_version()
10727 dv.minor_version = 0xff; in i40e_send_version()
10728 dv.build_version = 0xff; in i40e_send_version()
10729 dv.subbuild_version = 0; in i40e_send_version()
10740 u16 block_offset = 0xffff; in i40e_get_oem_version()
10741 u16 block_length = 0; in i40e_get_oem_version()
10742 u16 capabilities = 0; in i40e_get_oem_version()
10743 u16 gen_snap = 0; in i40e_get_oem_version()
10744 u16 release = 0; in i40e_get_oem_version()
10746 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B in i40e_get_oem_version()
10747 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00 in i40e_get_oem_version()
10748 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01 in i40e_get_oem_version()
10749 #define I40E_NVM_OEM_GEN_OFFSET 0x02 in i40e_get_oem_version()
10750 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03 in i40e_get_oem_version()
10751 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F in i40e_get_oem_version()
10756 if (block_offset == 0xffff) in i40e_get_oem_version()
10768 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0) in i40e_get_oem_version()
10886 hw->func_caps.num_rx_qp, 0, 0); in i40e_rebuild()
10953 for (v = 0; v < I40E_MAX_VEB; v++) { in i40e_rebuild()
10958 pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10976 } else if (pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10996 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_rebuild()
10998 vsi->mqprio_qopt.max_rate[0]); in i40e_rebuild()
10999 u64 credits = 0; in i40e_rebuild()
11029 #define I40E_REG_MSS 0x000E64DC in i40e_rebuild()
11030 #define I40E_REG_MSS_MIN_MASK 0x3FF0000 in i40e_rebuild()
11031 #define I40E_64BYTE_MSS 0x400000 in i40e_rebuild()
11163 …(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF numbe… in i40e_handle_mdd_event()
11165 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); in i40e_handle_mdd_event()
11178 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02… in i40e_handle_mdd_event()
11180 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); in i40e_handle_mdd_event()
11187 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); in i40e_handle_mdd_event()
11192 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); in i40e_handle_mdd_event()
11198 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { in i40e_handle_mdd_event()
11202 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); in i40e_handle_mdd_event()
11213 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); in i40e_handle_mdd_event()
11368 return 0; in i40e_set_num_rings_in_vsi()
11377 * On success: returns 0
11383 int ret = 0; in i40e_vsi_alloc_arrays()
11442 i = 0; in i40e_vsi_mem_alloc()
11463 vsi->flags = 0; in i40e_vsi_mem_alloc()
11465 vsi->int_rate_limit = 0; in i40e_vsi_mem_alloc()
11511 * On success: returns 0
11552 return 0; in i40e_vsi_clear()
11591 return 0; in i40e_vsi_clear()
11602 if (vsi->tx_rings && vsi->tx_rings[0]) { in i40e_vsi_clear_rings()
11603 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_vsi_clear_rings()
11624 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_alloc_rings()
11637 ring->size = 0; in i40e_alloc_rings()
11638 ring->dcb_tc = 0; in i40e_alloc_rings()
11654 ring->size = 0; in i40e_alloc_rings()
11655 ring->dcb_tc = 0; in i40e_alloc_rings()
11670 ring->size = 0; in i40e_alloc_rings()
11671 ring->dcb_tc = 0; in i40e_alloc_rings()
11676 return 0; in i40e_alloc_rings()
11694 if (vectors < 0) { in i40e_reserve_msix_vectors()
11697 vectors = 0; in i40e_reserve_msix_vectors()
11718 int iwarp_requested = 0; in i40e_init_msix()
11739 v_budget = 0; in i40e_init_msix()
11765 pf->num_fdsb_msix = 0; in i40e_init_msix()
11774 pf->num_iwarp_msix = 0; in i40e_init_msix()
11784 pf->num_vmdq_msix = 0; in i40e_init_msix()
11785 pf->num_vmdq_qps = 0; in i40e_init_msix()
11825 WARN(vectors_left < 0, in i40e_init_msix()
11834 for (i = 0; i < v_budget; i++) in i40e_init_msix()
11847 pf->num_vmdq_vsis = 0; in i40e_init_msix()
11848 pf->num_vmdq_qps = 0; in i40e_init_msix()
11907 (pf->num_fdsb_msix == 0)) { in i40e_init_msix()
11913 (pf->num_vmdq_msix == 0)) { in i40e_init_msix()
11919 (pf->num_iwarp_msix == 0)) { in i40e_init_msix()
11959 return 0; in i40e_vsi_alloc_q_vector()
11982 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { in i40e_vsi_alloc_q_vectors()
11988 return 0; in i40e_vsi_alloc_q_vectors()
12003 int vectors = 0; in i40e_init_interrupt_scheme()
12008 if (vectors < 0) { in i40e_init_interrupt_scheme()
12029 if (vectors < 0) { in i40e_init_interrupt_scheme()
12051 return 0; in i40e_init_interrupt_scheme()
12079 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_restore_interrupt_scheme()
12095 return 0; in i40e_restore_interrupt_scheme()
12111 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
12130 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; in i40e_setup_misc_vector_for_recovery_mode()
12145 return 0; in i40e_setup_misc_vector_for_recovery_mode()
12152 * This sets up the handler for MSIX 0, which is used to manage the
12159 int err = 0; in i40e_setup_misc_vector()
12163 err = request_irq(pf->msix_entries[0].vector, in i40e_setup_misc_vector()
12164 i40e_intr, 0, pf->int_name, pf); in i40e_setup_misc_vector()
12194 * Return 0 on success, negative on failure
12201 int ret = 0; in i40e_get_rss_aq()
12240 * Returns 0 on success, negative on failure
12255 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) in i40e_config_rss_reg()
12258 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) in i40e_config_rss_reg()
12271 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) in i40e_config_rss_reg()
12276 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) in i40e_config_rss_reg()
12284 return 0; in i40e_config_rss_reg()
12294 * Returns 0 on success, negative on failure
12306 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) in i40e_get_rss_reg()
12314 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) in i40e_get_rss_reg()
12318 return 0; in i40e_get_rss_reg()
12328 * Returns 0 on success, negative on failure
12347 * Returns 0 on success, negative on failure
12371 for (i = 0; i < rss_table_size; i++) in i40e_fill_rss_lut()
12390 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | in i40e_pf_config_rss()
12394 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); in i40e_pf_config_rss()
12447 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12457 return 0; in i40e_reconfig_rss_queues()
12526 memset(&bw_data, 0, sizeof(bw_data)); in i40e_set_partition_bw_setting()
12569 /* Read word 0x10 of NVM - SW compatibility word 1 */ in i40e_commit_partition_bw_setting()
12572 0x10, sizeof(nvm_word), &nvm_word, in i40e_commit_partition_bw_setting()
12605 0x10, sizeof(nvm_word), in i40e_commit_partition_bw_setting()
12606 &nvm_word, true, 0, NULL); in i40e_commit_partition_bw_setting()
12630 #define I40E_FEATURES_ENABLE_PTR 0x2A in i40e_is_total_port_shutdown_enabled()
12631 #define I40E_CURRENT_SETTING_PTR 0x2B in i40e_is_total_port_shutdown_enabled()
12632 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D in i40e_is_total_port_shutdown_enabled()
12633 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 in i40e_is_total_port_shutdown_enabled()
12634 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) in i40e_is_total_port_shutdown_enabled()
12637 u16 sr_emp_sr_settings_ptr = 0; in i40e_is_total_port_shutdown_enabled()
12638 u16 features_enable = 0; in i40e_is_total_port_shutdown_enabled()
12639 u16 link_behavior = 0; in i40e_is_total_port_shutdown_enabled()
12684 int err = 0; in i40e_sw_init()
12733 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || in i40e_sw_init()
12734 (pf->hw.func_caps.fd_filters_best_effort > 0)) { in i40e_sw_init()
12762 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 in i40e_sw_init()
12800 /* Enable PTP L4 if FW > v6.0 */ in i40e_sw_init()
12835 pf->eeprom_version = 0xDEAD; in i40e_sw_init()
12891 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { in i40e_set_ntuple()
12906 pf->fd_add_err = 0; in i40e_set_ntuple()
12907 pf->fd_atr_cnt = 0; in i40e_set_ntuple()
12929 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) in i40e_clear_rss_lut()
12930 wr32(hw, I40E_PFQF_HLUT(i), 0); in i40e_clear_rss_lut()
12932 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) in i40e_clear_rss_lut()
12933 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0); in i40e_clear_rss_lut()
12979 return 0; in i40e_set_features()
13004 return 0; in i40e_udp_tunnel_set_port()
13023 return 0; in i40e_udp_tunnel_unset_port()
13039 return 0; in i40e_get_phys_port_id()
13060 int err = 0; in i40e_ndo_fdb_add()
13087 err = 0; in i40e_ndo_fdb_add()
13125 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { in i40e_ndo_bridge_setlink()
13145 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
13168 return 0; in i40e_ndo_bridge_setlink()
13199 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { in i40e_ndo_bridge_getlink()
13205 return 0; in i40e_ndo_bridge_getlink()
13208 0, 0, nlflags, filter_mask, NULL); in i40e_ndo_bridge_getlink()
13317 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13327 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13332 return 0; in i40e_xdp_setup()
13339 * Returns 0 on success, <0 for failure.
13353 return 0; in i40e_enter_busy_conf()
13374 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, in i40e_queue_pair_reset_stats()
13376 memset(&vsi->tx_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13379 memset(&vsi->xdp_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13432 * Returns 0 on success, <0 on failure.
13438 int pf_q, ret = 0; in i40e_queue_pair_toggle_rings()
13520 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); in i40e_queue_pair_disable_irq()
13525 wr32(hw, I40E_PFINT_ICR0_ENA, 0); in i40e_queue_pair_disable_irq()
13526 wr32(hw, I40E_PFINT_DYN_CTL0, 0); in i40e_queue_pair_disable_irq()
13537 * Returns 0 on success, <0 on failure.
13562 * Returns 0 on success, <0 on failure.
13659 * Returns 0 on success, negative value on failure
13700 0; in i40e_config_netdev()
13767 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to in i40e_config_netdev()
13818 return 0; in i40e_config_netdev()
13825 * Returns 0 on success, negative value on failure
13840 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13860 return 0; in i40e_is_vsi_uplink_mode_veb()
13866 /* VEPA is now default bridge, so return 0 */ in i40e_is_vsi_uplink_mode_veb()
13867 return 0; in i40e_is_vsi_uplink_mode_veb()
13887 u8 enabled_tc = 0x1; /* TC0 enabled */ in i40e_add_vsi()
13888 int f_count = 0; in i40e_add_vsi()
13890 memset(&ctxt, 0, sizeof(ctxt)); in i40e_add_vsi()
13900 ctxt.vf_num = 0; in i40e_add_vsi()
13912 vsi->info.valid_sections = 0; in i40e_add_vsi()
13924 memset(&ctxt, 0, sizeof(ctxt)); in i40e_add_vsi()
13927 ctxt.vf_num = 0; in i40e_add_vsi()
13947 memset(&ctxt, 0, sizeof(ctxt)); in i40e_add_vsi()
13950 ctxt.vf_num = 0; in i40e_add_vsi()
13964 vsi->info.valid_sections = 0; in i40e_add_vsi()
13978 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", in i40e_add_vsi()
13989 ctxt.vf_num = 0; in i40e_add_vsi()
14005 ctxt.vf_num = 0; in i40e_add_vsi()
14082 vsi->info.valid_sections = 0; in i40e_add_vsi()
14087 vsi->active_filters = 0; in i40e_add_vsi()
14110 ret = 0; in i40e_add_vsi()
14121 * Returns 0 on success or < 0 on error
14193 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_release()
14196 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
14200 for (i = 0; i < I40E_MAX_VEB; i++) { in i40e_vsi_release()
14208 if (n == 0 && veb && veb->uplink_seid != 0) in i40e_vsi_release()
14211 return 0; in i40e_vsi_release()
14222 * Returns 0 on success or negative on failure
14229 if (vsi->q_vectors[0]) { in i40e_vsi_setup_vectors()
14246 vsi->num_q_vectors = 0; in i40e_vsi_setup_vectors()
14258 if (vsi->base_vector < 0) { in i40e_vsi_setup_vectors()
14305 if (ret < 0) { in i40e_vsi_reinit_setup()
14317 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
14381 for (i = 0; i < I40E_MAX_VEB; i++) { in i40e_vsi_setup()
14390 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_setup()
14403 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14405 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_vsi_setup()
14406 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14424 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { in i40e_vsi_setup()
14439 if (v_idx < 0) in i40e_vsi_setup()
14456 if (ret < 0) { in i40e_vsi_setup()
14546 int ret = 0; in i40e_veb_get_bw_info()
14573 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | in i40e_veb_get_bw_info()
14575 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { in i40e_veb_get_bw_info()
14579 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); in i40e_veb_get_bw_info()
14608 i = 0; in i40e_veb_mem_alloc()
14647 for (i = 0; i < I40E_MAX_VEB; i++) { in i40e_switch_branch_release()
14659 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_switch_branch_release()
14663 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
14706 int i, n = 0; in i40e_veb_release()
14711 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_veb_release()
14790 return 0; in i40e_add_veb()
14817 /* if one seid is 0, the other must be 0 to create a floating relay */ in i40e_veb_setup()
14818 if ((uplink_seid == 0 || vsi_seid == 0) && in i40e_veb_setup()
14819 (uplink_seid + vsi_seid != 0)) { in i40e_veb_setup()
14821 "one, not both seid's are 0: uplink=%d vsi=%d\n", in i40e_veb_setup()
14827 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) in i40e_veb_setup()
14830 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { in i40e_veb_setup()
14837 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { in i40e_veb_setup()
14853 if (veb_idx < 0) in i40e_veb_setup()
14859 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); in i40e_veb_setup()
14911 for (v = 0; v < I40E_MAX_VEB; v++) { in i40e_setup_pf_switch_element()
14919 if (v < 0) in i40e_setup_pf_switch_element()
14972 u16 next_seid = 0; in i40e_fetch_switch_configuration()
14973 int ret = 0; in i40e_fetch_switch_configuration()
15006 for (i = 0; i < num_reported; i++) { in i40e_fetch_switch_configuration()
15013 } while (next_seid != 0); in i40e_fetch_switch_configuration()
15025 * Returns 0 on success, negative value on failure
15029 u16 flags = 0; in i40e_setup_pf_switch()
15049 if ((pf->hw.pf_id == 0) && in i40e_setup_pf_switch()
15055 if (pf->hw.pf_id == 0) { in i40e_setup_pf_switch()
15059 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, in i40e_setup_pf_switch()
15085 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); in i40e_setup_pf_switch()
15098 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
15150 pf->num_lan_qps = 0; in i40e_determine_queue_usage()
15161 queues_left = 0; in i40e_determine_queue_usage()
15250 * Returns 0 on success, negative on failure
15269 return 0; in i40e_setup_pf_filter_control()
15350 if (fec_cfg == 0) in i40e_set_fec_in_flags()
15399 * Return 0 on success, negative on failure.
15450 * Return 0 if NIC is healthy or negative value when there are issues
15472 * Returns 0 on success, negative on failure
15483 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_init_recovery_mode()
15515 if (v_idx < 0) { in i40e_init_recovery_mode()
15546 return 0; in i40e_init_recovery_mode()
15586 * Returns 0 on success, negative on failure
15612 "DMA configuration failed: 0x%x\n", err); in i40e_probe()
15637 pf->next_vsi = 0; in i40e_probe()
15644 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), in i40e_probe()
15652 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", in i40e_probe()
15657 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); in i40e_probe()
15660 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", in i40e_probe()
15661 (unsigned int)pci_resource_start(pdev, 0), in i40e_probe()
15678 hw->switch_tag = 0xffff; in i40e_probe()
15700 if (hw->revision_id == 0 && in i40e_probe()
15793 /* Rev 0 hardware was never productized */ in i40e_probe()
15813 hw->func_caps.num_rx_qp, 0, 0); in i40e_probe()
15876 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_probe()
15908 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; in i40e_probe()
15909 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | in i40e_probe()
15953 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_probe()
16041 if (pf->iwarp_base_vector < 0) { in i40e_probe()
16136 #define MAX_FRAME_SIZE_DEFAULT 0x2600 in i40e_probe()
16160 return 0; in i40e_probe()
16211 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); in i40e_remove()
16212 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); in i40e_remove()
16236 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove()
16258 for (i = 0; i < I40E_MAX_VEB; i++) { in i40e_remove()
16263 pf->veb[i]->uplink_seid == 0) in i40e_remove()
16293 /* Free MSI/legacy interrupt 0 when in recovery mode. */ in i40e_remove()
16308 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_remove()
16318 for (i = 0; i < I40E_MAX_VEB; i++) { in i40e_remove()
16391 if (reg == 0) in i40e_pci_error_slot_reset()
16453 u16 flags = 0; in i40e_enable_mc_magic_wake()
16518 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_shutdown()
16520 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_shutdown()
16522 /* Free MSI/legacy interrupt 0 when in recovery mode. */ in i40e_shutdown()
16552 return 0; in i40e_suspend()
16576 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_suspend()
16577 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_suspend()
16588 return 0; in i40e_suspend()
16602 return 0; in i40e_resume()
16630 return 0; in i40e_resume()
16676 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); in i40e_init_module()
16690 return 0; in i40e_init_module()