Lines Matching full:pf
31 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
34 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
35 static int i40e_setup_misc_vector(struct i40e_pf *pf);
36 static void i40e_determine_queue_usage(struct i40e_pf *pf);
37 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
38 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
39 static int i40e_reset(struct i40e_pf *pf);
40 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
41 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
42 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
43 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
44 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
45 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
47 static int i40e_get_capabilities(struct i40e_pf *pf,
49 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_allocate_dma_mem_d() local
112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, in i40e_allocate_dma_mem_d()
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_free_dma_mem_d() local
129 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); in i40e_free_dma_mem_d()
172 * @pf: board private structure
183 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, in i40e_get_lump() argument
190 dev_info(&pf->pdev->dev, in i40e_get_lump()
259 * @pf: the pf structure to search for the vsi
262 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) in i40e_find_vsi_from_id() argument
266 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_find_vsi_from_id()
267 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
268 return pf->vsi[i]; in i40e_find_vsi_from_id()
275 * @pf: board private structure
279 void i40e_service_event_schedule(struct i40e_pf *pf) in i40e_service_event_schedule() argument
281 if ((!test_bit(__I40E_DOWN, pf->state) && in i40e_service_event_schedule()
282 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || in i40e_service_event_schedule()
283 test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_service_event_schedule()
284 queue_work(i40e_wq, &pf->service_task); in i40e_service_event_schedule()
300 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout() local
305 pf->tx_timeout_count++; in i40e_tx_timeout()
318 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) in i40e_tx_timeout()
319 pf->tx_timeout_recovery_level = 1; /* reset after some time */ in i40e_tx_timeout()
321 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) in i40e_tx_timeout()
325 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) in i40e_tx_timeout()
331 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_tx_timeout()
332 val = rd32(&pf->hw, in i40e_tx_timeout()
336 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); in i40e_tx_timeout()
344 pf->tx_timeout_last_recovery = jiffies; in i40e_tx_timeout()
346 pf->tx_timeout_recovery_level, txqueue); in i40e_tx_timeout()
348 switch (pf->tx_timeout_recovery_level) { in i40e_tx_timeout()
350 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
353 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
356 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
363 i40e_service_event_schedule(pf); in i40e_tx_timeout()
364 pf->tx_timeout_recovery_level++; in i40e_tx_timeout()
498 * i40e_pf_reset_stats - Reset all of the stats for the given PF
499 * @pf: the PF to be reset
501 void i40e_pf_reset_stats(struct i40e_pf *pf) in i40e_pf_reset_stats() argument
505 memset(&pf->stats, 0, sizeof(pf->stats)); in i40e_pf_reset_stats()
506 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); in i40e_pf_reset_stats()
507 pf->stat_offsets_loaded = false; in i40e_pf_reset_stats()
510 if (pf->veb[i]) { in i40e_pf_reset_stats()
511 memset(&pf->veb[i]->stats, 0, in i40e_pf_reset_stats()
512 sizeof(pf->veb[i]->stats)); in i40e_pf_reset_stats()
513 memset(&pf->veb[i]->stats_offsets, 0, in i40e_pf_reset_stats()
514 sizeof(pf->veb[i]->stats_offsets)); in i40e_pf_reset_stats()
515 memset(&pf->veb[i]->tc_stats, 0, in i40e_pf_reset_stats()
516 sizeof(pf->veb[i]->tc_stats)); in i40e_pf_reset_stats()
517 memset(&pf->veb[i]->tc_stats_offsets, 0, in i40e_pf_reset_stats()
518 sizeof(pf->veb[i]->tc_stats_offsets)); in i40e_pf_reset_stats()
519 pf->veb[i]->stat_offsets_loaded = false; in i40e_pf_reset_stats()
522 pf->hw_csum_rx_error = 0; in i40e_pf_reset_stats()
603 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats() local
604 struct i40e_hw *hw = &pf->hw; in i40e_update_eth_stats()
664 struct i40e_pf *pf = veb->pf; in i40e_update_veb_stats() local
665 struct i40e_hw *hw = &pf->hw; in i40e_update_veb_stats()
749 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats() local
766 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_update_vsi_stats()
859 /* pull in a couple PF stats if this is the main vsi */ in i40e_update_vsi_stats()
860 if (vsi == pf->vsi[pf->lan_vsi]) { in i40e_update_vsi_stats()
861 ns->rx_crc_errors = pf->stats.crc_errors; in i40e_update_vsi_stats()
862 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; in i40e_update_vsi_stats()
863 ns->rx_length_errors = pf->stats.rx_length_errors; in i40e_update_vsi_stats()
868 * i40e_update_pf_stats - Update the PF statistics counters.
869 * @pf: the PF to be updated
871 static void i40e_update_pf_stats(struct i40e_pf *pf) in i40e_update_pf_stats() argument
873 struct i40e_hw_port_stats *osd = &pf->stats_offsets; in i40e_update_pf_stats()
874 struct i40e_hw_port_stats *nsd = &pf->stats; in i40e_update_pf_stats()
875 struct i40e_hw *hw = &pf->hw; in i40e_update_pf_stats()
881 pf->stat_offsets_loaded, in i40e_update_pf_stats()
885 pf->stat_offsets_loaded, in i40e_update_pf_stats()
888 pf->stat_offsets_loaded, in i40e_update_pf_stats()
893 pf->stat_offsets_loaded, in i40e_update_pf_stats()
898 pf->stat_offsets_loaded, in i40e_update_pf_stats()
903 pf->stat_offsets_loaded, in i40e_update_pf_stats()
908 pf->stat_offsets_loaded, in i40e_update_pf_stats()
913 pf->stat_offsets_loaded, in i40e_update_pf_stats()
918 pf->stat_offsets_loaded, in i40e_update_pf_stats()
923 pf->stat_offsets_loaded, in i40e_update_pf_stats()
928 pf->stat_offsets_loaded, in i40e_update_pf_stats()
932 pf->stat_offsets_loaded, in i40e_update_pf_stats()
936 pf->stat_offsets_loaded, in i40e_update_pf_stats()
940 pf->stat_offsets_loaded, in i40e_update_pf_stats()
945 pf->stat_offsets_loaded, in i40e_update_pf_stats()
950 pf->stat_offsets_loaded, in i40e_update_pf_stats()
953 pf->stat_offsets_loaded, in i40e_update_pf_stats()
956 pf->stat_offsets_loaded, in i40e_update_pf_stats()
959 pf->stat_offsets_loaded, in i40e_update_pf_stats()
964 pf->stat_offsets_loaded, in i40e_update_pf_stats()
968 pf->stat_offsets_loaded, in i40e_update_pf_stats()
972 pf->stat_offsets_loaded, in i40e_update_pf_stats()
976 pf->stat_offsets_loaded, in i40e_update_pf_stats()
981 pf->stat_offsets_loaded, in i40e_update_pf_stats()
988 pf->stat_offsets_loaded, in i40e_update_pf_stats()
992 pf->stat_offsets_loaded, in i40e_update_pf_stats()
996 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1000 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1004 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1008 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1012 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1017 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1021 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1025 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1029 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1033 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1037 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1041 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1045 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1048 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1051 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1054 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1076 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1079 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1082 if (pf->flags & I40E_FLAG_FD_SB_ENABLED && in i40e_update_pf_stats()
1083 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1088 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && in i40e_update_pf_stats()
1089 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1094 pf->stat_offsets_loaded = true; in i40e_update_pf_stats()
1105 struct i40e_pf *pf = vsi->back; in i40e_update_stats() local
1107 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_update_stats()
1108 i40e_update_pf_stats(pf); in i40e_update_stats()
1329 * @vsi: the PF Main VSI - inappropriate for any other VSI
1338 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter() local
1340 /* Only appropriate for the PF main VSI */ in i40e_rm_default_mac_filter()
1349 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1357 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1556 struct i40e_pf *pf = vsi->back; in i40e_set_mac() local
1557 struct i40e_hw *hw = &pf->hw; in i40e_set_mac()
1569 if (test_bit(__I40E_DOWN, pf->state) || in i40e_set_mac()
1570 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_set_mac()
1605 i40e_service_event_schedule(pf); in i40e_set_mac()
1619 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq() local
1620 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_aq()
1628 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1640 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1656 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss() local
1661 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) in i40e_vsi_config_rss()
1664 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1678 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
1789 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map() local
1811 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); in i40e_vsi_setup_queue_map()
1816 i40e_pf_get_max_q_per_tc(pf)); in i40e_vsi_setup_queue_map()
1823 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
1824 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); in i40e_vsi_setup_queue_map()
1835 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_vsi_setup_queue_map()
1838 qcount = min_t(int, pf->alloc_rss_size, in i40e_vsi_setup_queue_map()
1887 else if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_setup_queue_map()
1888 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
2208 * @pf: board private structure
2211 * There are different ways of setting promiscuous mode on a PF depending on
2215 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) in i40e_set_promiscuous() argument
2217 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_set_promiscuous()
2218 struct i40e_hw *hw = &pf->hw; in i40e_set_promiscuous()
2222 pf->lan_veb != I40E_NO_VEB && in i40e_set_promiscuous()
2223 !(pf->flags & I40E_FLAG_MFP_ENABLED)) { in i40e_set_promiscuous()
2238 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2250 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2260 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2268 pf->cur_promisc = promisc; in i40e_set_promiscuous()
2290 char vsi_name[16] = "PF"; in i40e_sync_vsi_filters()
2295 struct i40e_pf *pf; in i40e_sync_vsi_filters() local
2309 pf = vsi->back; in i40e_sync_vsi_filters()
2522 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2530 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2555 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2561 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", in i40e_sync_vsi_filters()
2572 aq_ret = i40e_set_promiscuous(pf, cur_promisc); in i40e_sync_vsi_filters()
2576 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2607 * @pf: board private structure
2609 static void i40e_sync_filters_subtask(struct i40e_pf *pf) in i40e_sync_filters_subtask() argument
2613 if (!pf) in i40e_sync_filters_subtask()
2615 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) in i40e_sync_filters_subtask()
2617 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { in i40e_sync_filters_subtask()
2618 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_sync_filters_subtask()
2622 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_sync_filters_subtask()
2623 if (pf->vsi[v] && in i40e_sync_filters_subtask()
2624 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { in i40e_sync_filters_subtask()
2625 int ret = i40e_sync_vsi_filters(pf->vsi[v]); in i40e_sync_filters_subtask()
2630 pf->state); in i40e_sync_filters_subtask()
2635 clear_bit(__I40E_VF_DISABLE, pf->state); in i40e_sync_filters_subtask()
2661 struct i40e_pf *pf = vsi->back; in i40e_change_mtu() local
2675 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_change_mtu()
2676 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_change_mtu()
2689 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl() local
2693 return i40e_ptp_get_ts_config(pf, ifr); in i40e_ioctl()
2695 return i40e_ptp_set_ts_config(pf, ifr); in i40e_ioctl()
3506 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore() local
3509 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_filter_restore()
3513 pf->fd_tcp4_filter_cnt = 0; in i40e_fdir_filter_restore()
3514 pf->fd_udp4_filter_cnt = 0; in i40e_fdir_filter_restore()
3515 pf->fd_sctp4_filter_cnt = 0; in i40e_fdir_filter_restore()
3516 pf->fd_ip4_filter_cnt = 0; in i40e_fdir_filter_restore()
3519 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_restore()
3549 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix() local
3550 struct i40e_hw *hw = &pf->hw; in i40e_vsi_configure_msix()
3629 * @pf: pointer to private device data structure
3631 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) in i40e_enable_misc_int_causes() argument
3633 struct i40e_hw *hw = &pf->hw; in i40e_enable_misc_int_causes()
3649 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_enable_misc_int_causes()
3652 if (pf->flags & I40E_FLAG_PTP) in i40e_enable_misc_int_causes()
3673 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy() local
3674 struct i40e_hw *hw = &pf->hw; in i40e_configure_msi_and_legacy()
3687 i40e_enable_misc_int_causes(pf); in i40e_configure_msi_and_legacy()
3719 * @pf: board private structure
3721 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) in i40e_irq_dynamic_disable_icr0() argument
3723 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_disable_icr0()
3732 * @pf: board private structure
3734 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) in i40e_irq_dynamic_enable_icr0() argument
3736 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_enable_icr0()
3801 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix() local
3812 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
3834 dev_info(&pf->pdev->dev, in i40e_vsi_request_irq_msix()
3859 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
3873 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq() local
3874 struct i40e_hw *hw = &pf->hw; in i40e_vsi_disable_irq()
3896 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_disable_irq()
3903 synchronize_irq(pf->msix_entries[i + base].vector); in i40e_vsi_disable_irq()
3909 synchronize_irq(pf->pdev->irq); in i40e_vsi_disable_irq()
3919 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq() local
3922 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_enable_irq()
3926 i40e_irq_dynamic_enable_icr0(pf); in i40e_vsi_enable_irq()
3929 i40e_flush(&pf->hw); in i40e_vsi_enable_irq()
3935 * @pf: board private structure
3937 static void i40e_free_misc_vector(struct i40e_pf *pf) in i40e_free_misc_vector() argument
3940 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); in i40e_free_misc_vector()
3941 i40e_flush(&pf->hw); in i40e_free_misc_vector()
3943 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { in i40e_free_misc_vector()
3944 synchronize_irq(pf->msix_entries[0].vector); in i40e_free_misc_vector()
3945 free_irq(pf->msix_entries[0].vector, pf); in i40e_free_misc_vector()
3946 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_free_misc_vector()
3961 struct i40e_pf *pf = (struct i40e_pf *)data; in i40e_intr() local
3962 struct i40e_hw *hw = &pf->hw; in i40e_intr()
3977 pf->sw_int_count++; in i40e_intr()
3979 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_intr()
3982 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); in i40e_intr()
3983 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_intr()
3988 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_intr()
3997 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_intr()
4003 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_intr()
4004 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); in i40e_intr()
4009 set_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_intr()
4014 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { in i40e_intr()
4021 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); in i40e_intr()
4026 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_intr()
4027 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4033 pf->corer_count++; in i40e_intr()
4035 pf->globr_count++; in i40e_intr()
4037 pf->empr_count++; in i40e_intr()
4038 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4044 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); in i40e_intr()
4045 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", in i40e_intr()
4055 i40e_ptp_tx_hwtstamp(pf); in i40e_intr()
4065 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", in i40e_intr()
4070 dev_info(&pf->pdev->dev, "device will be reset\n"); in i40e_intr()
4071 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_intr()
4072 i40e_service_event_schedule(pf); in i40e_intr()
4081 if (!test_bit(__I40E_DOWN, pf->state) || in i40e_intr()
4082 test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_intr()
4083 i40e_service_event_schedule(pf); in i40e_intr()
4084 i40e_irq_dynamic_enable_icr0(pf); in i40e_intr()
4279 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq() local
4282 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_request_irq()
4284 else if (pf->flags & I40E_FLAG_MSI_ENABLED) in i40e_vsi_request_irq()
4285 err = request_irq(pf->pdev->irq, i40e_intr, 0, in i40e_vsi_request_irq()
4286 pf->int_name, pf); in i40e_vsi_request_irq()
4288 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, in i40e_vsi_request_irq()
4289 pf->int_name, pf); in i40e_vsi_request_irq()
4292 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); in i40e_vsi_request_irq()
4309 struct i40e_pf *pf = vsi->back; in i40e_netpoll() local
4316 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_netpoll()
4320 i40e_intr(pf->pdev->irq, netdev); in i40e_netpoll()
4328 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4329 * @pf: the PF being configured
4330 * @pf_q: the PF queue
4333 * This routine will wait for the given Tx queue of the PF to reach the
4338 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) in i40e_pf_txq_wait() argument
4344 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); in i40e_pf_txq_wait()
4358 * @pf: the PF structure
4359 * @pf_q: the PF queue to configure
4366 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_tx_q() argument
4368 struct i40e_hw *hw = &pf->hw; in i40e_control_tx_q()
4373 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); in i40e_control_tx_q()
4403 * @pf: the PF structure
4404 * @pf_q: the PF queue to configure
4408 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, in i40e_control_wait_tx_q() argument
4413 i40e_control_tx_q(pf, pf_q, enable); in i40e_control_wait_tx_q()
4416 ret = i40e_pf_txq_wait(pf, pf_q, enable); in i40e_control_wait_tx_q()
4418 dev_info(&pf->pdev->dev, in i40e_control_wait_tx_q()
4434 struct i40e_pf *pf = vsi->back; in i40e_vsi_control_tx() local
4439 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_control_tx()
4448 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_control_tx()
4458 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4459 * @pf: the PF being configured
4460 * @pf_q: the PF queue
4463 * This routine will wait for the given Rx queue of the PF to reach the
4468 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) in i40e_pf_rxq_wait() argument
4474 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); in i40e_pf_rxq_wait()
4488 * @pf: the PF structure
4489 * @pf_q: the PF queue to configure
4496 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_rx_q() argument
4498 struct i40e_hw *hw = &pf->hw; in i40e_control_rx_q()
4525 * @pf: the PF structure
4533 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) in i40e_control_wait_rx_q() argument
4537 i40e_control_rx_q(pf, pf_q, enable); in i40e_control_wait_rx_q()
4540 ret = i40e_pf_rxq_wait(pf, pf_q, enable); in i40e_control_wait_rx_q()
4554 struct i40e_pf *pf = vsi->back; in i40e_vsi_control_rx() local
4559 ret = i40e_control_wait_rx_q(pf, pf_q, enable); in i40e_vsi_control_rx()
4561 dev_info(&pf->pdev->dev, in i40e_vsi_control_rx()
4624 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait() local
4629 i40e_control_tx_q(pf, pf_q, false); in i40e_vsi_stop_rings_no_wait()
4630 i40e_control_rx_q(pf, pf_q, false); in i40e_vsi_stop_rings_no_wait()
4640 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq() local
4641 struct i40e_hw *hw = &pf->hw; in i40e_vsi_free_irq()
4646 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_vsi_free_irq()
4659 irq_num = pf->msix_entries[vector].vector; in i40e_vsi_free_irq()
4720 free_irq(pf->pdev->irq, pf); in i40e_vsi_free_irq()
4804 * @pf: board private structure
4806 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) in i40e_reset_interrupt_capability() argument
4809 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_reset_interrupt_capability()
4810 pci_disable_msix(pf->pdev); in i40e_reset_interrupt_capability()
4811 kfree(pf->msix_entries); in i40e_reset_interrupt_capability()
4812 pf->msix_entries = NULL; in i40e_reset_interrupt_capability()
4813 kfree(pf->irq_pile); in i40e_reset_interrupt_capability()
4814 pf->irq_pile = NULL; in i40e_reset_interrupt_capability()
4815 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { in i40e_reset_interrupt_capability()
4816 pci_disable_msi(pf->pdev); in i40e_reset_interrupt_capability()
4818 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_reset_interrupt_capability()
4823 * @pf: board private structure
4828 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) in i40e_clear_interrupt_scheme() argument
4832 i40e_free_misc_vector(pf); in i40e_clear_interrupt_scheme()
4834 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, in i40e_clear_interrupt_scheme()
4837 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); in i40e_clear_interrupt_scheme()
4838 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_clear_interrupt_scheme()
4839 if (pf->vsi[i]) in i40e_clear_interrupt_scheme()
4840 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_clear_interrupt_scheme()
4841 i40e_reset_interrupt_capability(pf); in i40e_clear_interrupt_scheme()
4888 struct i40e_pf *pf = vsi->back; in i40e_vsi_close() local
4895 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_vsi_close()
4896 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_vsi_close()
4897 set_bit(__I40E_CLIENT_RESET, pf->state); in i40e_vsi_close()
4932 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4933 * @pf: the PF
4935 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) in i40e_pf_quiesce_all_vsi() argument
4939 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_quiesce_all_vsi()
4940 if (pf->vsi[v]) in i40e_pf_quiesce_all_vsi()
4941 i40e_quiesce_vsi(pf->vsi[v]); in i40e_pf_quiesce_all_vsi()
4946 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4947 * @pf: the PF
4949 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) in i40e_pf_unquiesce_all_vsi() argument
4953 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_unquiesce_all_vsi()
4954 if (pf->vsi[v]) in i40e_pf_unquiesce_all_vsi()
4955 i40e_unquiesce_vsi(pf->vsi[v]); in i40e_pf_unquiesce_all_vsi()
4967 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled() local
4973 ret = i40e_pf_txq_wait(pf, pf_q, false); in i40e_vsi_wait_queues_disabled()
4975 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
4985 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
4988 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
4995 ret = i40e_pf_rxq_wait(pf, pf_q, false); in i40e_vsi_wait_queues_disabled()
4997 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5009 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5010 * @pf: the PF
5013 * VSIs that are managed by this PF.
5015 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) in i40e_pf_wait_queues_disabled() argument
5019 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { in i40e_pf_wait_queues_disabled()
5020 if (pf->vsi[v]) { in i40e_pf_wait_queues_disabled()
5021 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); in i40e_pf_wait_queues_disabled()
5034 * @pf: pointer to PF
5036 * Get TC map for ISCSI PF type that will include iSCSI TC
5039 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) in i40e_get_iscsi_tc_map() argument
5042 struct i40e_hw *hw = &pf->hw; in i40e_get_iscsi_tc_map()
5124 * @pf: PF being queried
5129 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) in i40e_mqprio_get_enabled_tc() argument
5131 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_mqprio_get_enabled_tc()
5141 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5142 * @pf: PF being queried
5144 * Return number of traffic classes enabled for the given PF
5146 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) in i40e_pf_get_num_tc() argument
5148 struct i40e_hw *hw = &pf->hw; in i40e_pf_get_num_tc()
5153 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_pf_get_num_tc()
5154 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5157 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_num_tc()
5161 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_num_tc()
5164 /* MFP mode return count of enabled TCs for this PF */ in i40e_pf_get_num_tc()
5165 if (pf->hw.func_caps.iscsi) in i40e_pf_get_num_tc()
5166 enabled_tc = i40e_get_iscsi_tc_map(pf); in i40e_pf_get_num_tc()
5179 * @pf: PF being queried
5181 * Return a bitmap for enabled traffic classes for this PF.
5183 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) in i40e_pf_get_tc_map() argument
5185 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_pf_get_tc_map()
5186 return i40e_mqprio_get_enabled_tc(pf); in i40e_pf_get_tc_map()
5188 /* If neither MQPRIO nor DCB is enabled for this PF then just return in i40e_pf_get_tc_map()
5191 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) in i40e_pf_get_tc_map()
5194 /* SFP mode we want PF to be enabled for all TCs */ in i40e_pf_get_tc_map()
5195 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_pf_get_tc_map()
5196 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); in i40e_pf_get_tc_map()
5198 /* MFP enabled and iSCSI PF type */ in i40e_pf_get_tc_map()
5199 if (pf->hw.func_caps.iscsi) in i40e_pf_get_tc_map()
5200 return i40e_get_iscsi_tc_map(pf); in i40e_pf_get_tc_map()
5215 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info() local
5216 struct i40e_hw *hw = &pf->hw; in i40e_vsi_get_bw_info()
5224 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5225 "couldn't get PF vsi bw config, err %s aq_err %s\n", in i40e_vsi_get_bw_info()
5226 i40e_stat_str(&pf->hw, ret), in i40e_vsi_get_bw_info()
5227 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5235 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5236 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", in i40e_vsi_get_bw_info()
5237 i40e_stat_str(&pf->hw, ret), in i40e_vsi_get_bw_info()
5238 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5243 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5277 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc() local
5282 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_vsi_configure_bw_alloc()
5284 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_configure_bw_alloc()
5287 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5296 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5298 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5300 pf->hw.aq.asq_last_status); in i40e_vsi_configure_bw_alloc()
5319 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc() local
5320 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_netdev_tc()
5353 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_vsi_config_netdev_tc()
5401 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc() local
5402 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_tc()
5422 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5428 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5441 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5449 dev_err(&pf->pdev->dev, in i40e_vsi_config_tc()
5495 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5508 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5528 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed() local
5530 switch (pf->hw.phy.link_info.link_speed) { in i40e_get_link_speed()
5556 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit() local
5563 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5569 dev_warn(&pf->pdev->dev, in i40e_set_bw_limit()
5577 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, in i40e_set_bw_limit()
5580 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5582 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), in i40e_set_bw_limit()
5583 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_set_bw_limit()
5598 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels() local
5642 &pf->cloud_filter_list, cloud_node) { in i40e_remove_queue_channels()
5654 last_aq_status = pf->hw.aq.asq_last_status; in i40e_remove_queue_channels()
5656 dev_info(&pf->pdev->dev, in i40e_remove_queue_channels()
5658 i40e_stat_str(&pf->hw, ret), in i40e_remove_queue_channels()
5659 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_remove_queue_channels()
5717 * @pf: ptr to PF device
5726 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, in i40e_validate_num_queues() argument
5737 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
5743 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
5758 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
5778 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss() local
5780 struct i40e_hw *hw = &pf->hw; in i40e_vsi_reconfig_rss()
5797 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
5809 dev_info(&pf->pdev->dev, in i40e_vsi_reconfig_rss()
5828 * @pf: ptr to PF device
5834 static void i40e_channel_setup_queue_map(struct i40e_pf *pf, in i40e_channel_setup_queue_map() argument
5845 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); in i40e_channel_setup_queue_map()
5867 * @pf: ptr to PF device
5873 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, in i40e_add_channel() argument
5876 struct i40e_hw *hw = &pf->hw; in i40e_add_channel()
5882 dev_info(&pf->pdev->dev, in i40e_add_channel()
5895 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { in i40e_add_channel()
5903 i40e_channel_setup_queue_map(pf, &ctxt, ch); in i40e_add_channel()
5908 dev_info(&pf->pdev->dev, in i40e_add_channel()
5910 i40e_stat_str(&pf->hw, ret), in i40e_add_channel()
5911 i40e_aq_str(&pf->hw, in i40e_add_channel()
5912 pf->hw.aq.asq_last_status)); in i40e_add_channel()
5965 * @pf: ptr to PF device
5972 static int i40e_channel_config_tx_ring(struct i40e_pf *pf, in i40e_channel_config_tx_ring() argument
6017 * @pf: ptr to PF device
6026 static inline int i40e_setup_hw_channel(struct i40e_pf *pf, in i40e_setup_hw_channel() argument
6038 ret = i40e_add_channel(pf, uplink_seid, ch); in i40e_setup_hw_channel()
6040 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6050 ret = i40e_channel_config_tx_ring(pf, vsi, ch); in i40e_setup_hw_channel()
6052 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6060 dev_dbg(&pf->pdev->dev, in i40e_setup_hw_channel()
6061 …"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base… in i40e_setup_hw_channel()
6070 * @pf: ptr to PF device
6077 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, in i40e_setup_channel() argument
6087 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", in i40e_setup_channel()
6093 seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_setup_channel()
6096 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); in i40e_setup_channel()
6098 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); in i40e_setup_channel()
6107 * @vsi: ptr to VSI which has PF backing
6115 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode() local
6116 struct i40e_hw *hw = &pf->hw; in i40e_validate_and_set_switch_mode()
6119 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); in i40e_validate_and_set_switch_mode()
6132 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6149 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, in i40e_validate_and_set_switch_mode()
6150 pf->last_sw_conf_valid_flags, in i40e_validate_and_set_switch_mode()
6153 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6173 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel() local
6181 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", in i40e_create_queue_channel()
6187 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6190 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", in i40e_create_queue_channel()
6198 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || in i40e_create_queue_channel()
6201 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6207 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_create_queue_channel()
6208 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_create_queue_channel()
6211 if (pf->flags & I40E_FLAG_TC_MQPRIO) in i40e_create_queue_channel()
6212 i40e_do_reset(pf, I40E_PF_RESET_FLAG, in i40e_create_queue_channel()
6215 i40e_do_reset_safe(pf, in i40e_create_queue_channel()
6228 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6238 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6245 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_create_queue_channel()
6246 dev_info(&pf->pdev->dev, "Failed to setup channel\n"); in i40e_create_queue_channel()
6250 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6262 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6342 struct i40e_pf *pf = veb->pf; in i40e_veb_config_tc() local
6359 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, in i40e_veb_config_tc()
6362 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6364 i40e_stat_str(&pf->hw, ret), in i40e_veb_config_tc()
6365 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6372 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6374 i40e_stat_str(&pf->hw, ret), in i40e_veb_config_tc()
6375 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6385 * @pf: PF struct
6387 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6391 static void i40e_dcb_reconfigure(struct i40e_pf *pf) in i40e_dcb_reconfigure() argument
6397 /* Enable the TCs available on PF to all VEBs */ in i40e_dcb_reconfigure()
6398 tc_map = i40e_pf_get_tc_map(pf); in i40e_dcb_reconfigure()
6400 if (!pf->veb[v]) in i40e_dcb_reconfigure()
6402 ret = i40e_veb_config_tc(pf->veb[v], tc_map); in i40e_dcb_reconfigure()
6404 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6406 pf->veb[v]->seid); in i40e_dcb_reconfigure()
6412 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_dcb_reconfigure()
6413 if (!pf->vsi[v]) in i40e_dcb_reconfigure()
6419 if (v == pf->lan_vsi) in i40e_dcb_reconfigure()
6420 tc_map = i40e_pf_get_tc_map(pf); in i40e_dcb_reconfigure()
6424 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); in i40e_dcb_reconfigure()
6426 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6428 pf->vsi[v]->seid); in i40e_dcb_reconfigure()
6432 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); in i40e_dcb_reconfigure()
6433 if (pf->vsi[v]->netdev) in i40e_dcb_reconfigure()
6434 i40e_dcbnl_set_all(pf->vsi[v]); in i40e_dcb_reconfigure()
6441 * @pf: PF struct
6443 * Resume a port's Tx and issue a PF reset in case of failure to
6446 static int i40e_resume_port_tx(struct i40e_pf *pf) in i40e_resume_port_tx() argument
6448 struct i40e_hw *hw = &pf->hw; in i40e_resume_port_tx()
6453 dev_info(&pf->pdev->dev, in i40e_resume_port_tx()
6455 i40e_stat_str(&pf->hw, ret), in i40e_resume_port_tx()
6456 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_resume_port_tx()
6457 /* Schedule PF reset to recover */ in i40e_resume_port_tx()
6458 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_resume_port_tx()
6459 i40e_service_event_schedule(pf); in i40e_resume_port_tx()
6467 * @pf: PF being configured
6472 static int i40e_init_pf_dcb(struct i40e_pf *pf) in i40e_init_pf_dcb() argument
6474 struct i40e_hw *hw = &pf->hw; in i40e_init_pf_dcb()
6480 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || in i40e_init_pf_dcb()
6481 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) { in i40e_init_pf_dcb()
6482 dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n"); in i40e_init_pf_dcb()
6492 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
6493 "DCBX offload is not supported or is disabled for this PF.\n"); in i40e_init_pf_dcb()
6496 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | in i40e_init_pf_dcb()
6499 pf->flags |= I40E_FLAG_DCB_CAPABLE; in i40e_init_pf_dcb()
6504 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
6506 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_init_pf_dcb()
6507 dev_dbg(&pf->pdev->dev, in i40e_init_pf_dcb()
6508 "DCBX offload is supported for this PF.\n"); in i40e_init_pf_dcb()
6510 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_init_pf_dcb()
6511 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); in i40e_init_pf_dcb()
6512 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP; in i40e_init_pf_dcb()
6514 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
6516 i40e_stat_str(&pf->hw, err), in i40e_init_pf_dcb()
6517 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_init_pf_dcb()
6533 struct i40e_pf *pf = vsi->back; in i40e_print_link_message() local
6541 new_speed = pf->hw.phy.link_info.link_speed; in i40e_print_link_message()
6557 if (pf->hw.func_caps.npar_enable && in i40e_print_link_message()
6558 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || in i40e_print_link_message()
6559 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) in i40e_print_link_message()
6563 switch (pf->hw.phy.link_info.link_speed) { in i40e_print_link_message()
6592 switch (pf->hw.fc.current_mode) { in i40e_print_link_message()
6607 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { in i40e_print_link_message()
6612 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
6615 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
6618 else if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
6636 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { in i40e_print_link_message()
6641 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
6644 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
6648 if (pf->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
6669 struct i40e_pf *pf = vsi->back; in i40e_up_complete() local
6672 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_up_complete()
6686 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && in i40e_up_complete()
6696 pf->fd_add_err = 0; in i40e_up_complete()
6697 pf->fd_atr_cnt = 0; in i40e_up_complete()
6704 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_up_complete()
6705 i40e_service_event_schedule(pf); in i40e_up_complete()
6719 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked() local
6721 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_vsi_reinit_locked()
6726 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_vsi_reinit_locked()
6731 * @pf: board private structure
6734 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) in i40e_force_link_state() argument
6739 struct i40e_hw *hw = &pf->hw; in i40e_force_link_state()
6753 dev_err(&pf->pdev->dev, in i40e_force_link_state()
6765 dev_err(&pf->pdev->dev, in i40e_force_link_state()
6776 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) in i40e_force_link_state()
6792 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { in i40e_force_link_state()
6810 dev_err(&pf->pdev->dev, in i40e_force_link_state()
6812 i40e_stat_str(&pf->hw, err), in i40e_force_link_state()
6813 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_force_link_state()
7096 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up() local
7097 struct i40e_hw *hw = &pf->hw; in i40e_fwd_ring_up()
7148 dev_info(&pf->pdev->dev, in i40e_fwd_ring_up()
7168 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans() local
7169 struct i40e_hw *hw = &pf->hw; in i40e_setup_macvlans()
7205 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7221 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7241 if (!i40e_setup_channel(pf, vsi, ch)) { in i40e_setup_macvlans()
7255 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); in i40e_setup_macvlans()
7271 struct i40e_pf *pf = vsi->back; in i40e_fwd_add() local
7275 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_fwd_add()
7279 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { in i40e_fwd_add()
7283 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { in i40e_fwd_add()
7295 /* reserve bit 0 for the pf device */ in i40e_fwd_add()
7302 vectors = pf->num_lan_msix; in i40e_fwd_add()
7304 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ in i40e_fwd_add()
7308 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ in i40e_fwd_add()
7312 /* allocate 1 Q per macvlan and 16 Qs to the PF*/ in i40e_fwd_add()
7316 /* allocate 1 Q per macvlan and 8 Qs to the PF */ in i40e_fwd_add()
7320 /* allocate 1 Q per macvlan and 1 Q to the PF */ in i40e_fwd_add()
7379 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans() local
7380 struct i40e_hw *hw = &pf->hw; in i40e_del_all_macvlans()
7416 struct i40e_pf *pf = vsi->back; in i40e_fwd_del() local
7417 struct i40e_hw *hw = &pf->hw; in i40e_fwd_del()
7437 dev_info(&pf->pdev->dev, in i40e_fwd_del()
7457 struct i40e_pf *pf = vsi->back; in i40e_setup_tc() local
7470 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
7476 if (pf->flags & I40E_FLAG_MFP_ENABLED) { in i40e_setup_tc()
7483 pf->flags &= ~I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
7486 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_setup_tc()
7493 if (num_tc > i40e_pf_get_num_tc(pf)) { in i40e_setup_tc()
7500 if (pf->flags & I40E_FLAG_DCB_ENABLED) { in i40e_setup_tc()
7505 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_setup_tc()
7512 pf->flags |= I40E_FLAG_TC_MQPRIO; in i40e_setup_tc()
7513 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_setup_tc()
7532 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) in i40e_setup_tc()
7548 if (pf->flags & I40E_FLAG_TC_MQPRIO) { in i40e_setup_tc()
7646 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter() local
7683 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
7686 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
7689 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter()
7692 pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter()
7694 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter()
7714 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf() local
7769 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
7782 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
7788 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
7791 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
7796 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
7798 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter_big_buf()
7800 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
7821 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower() local
7833 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", in i40e_parse_cls_flower()
7873 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", in i40e_parse_cls_flower()
7883 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", in i40e_parse_cls_flower()
7901 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", in i40e_parse_cls_flower()
7925 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", in i40e_parse_cls_flower()
7935 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", in i40e_parse_cls_flower()
7942 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); in i40e_parse_cls_flower()
7959 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
7981 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", in i40e_parse_cls_flower()
7991 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", in i40e_parse_cls_flower()
8005 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8060 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower() local
8068 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_configure_clsflower()
8069 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_configure_clsflower()
8072 if (pf->fdir_pf_active_filters || in i40e_configure_clsflower()
8073 (!hlist_empty(&pf->fdir_filter_list))) { in i40e_configure_clsflower()
8107 dev_err(&pf->pdev->dev, in i40e_configure_clsflower()
8109 i40e_stat_str(&pf->hw, err)); in i40e_configure_clsflower()
8116 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); in i40e_configure_clsflower()
8118 pf->num_cloud_filters++; in i40e_configure_clsflower()
8155 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower() local
8172 dev_err(&pf->pdev->dev, in i40e_delete_clsflower()
8174 i40e_stat_str(&pf->hw, err)); in i40e_delete_clsflower()
8175 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); in i40e_delete_clsflower()
8178 pf->num_cloud_filters--; in i40e_delete_clsflower()
8179 if (!pf->num_cloud_filters) in i40e_delete_clsflower()
8180 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_delete_clsflower()
8181 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_delete_clsflower()
8182 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_delete_clsflower()
8183 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_delete_clsflower()
8184 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_delete_clsflower()
8264 struct i40e_pf *pf = vsi->back; in i40e_open() local
8268 if (test_bit(__I40E_TESTING, pf->state) || in i40e_open()
8269 test_bit(__I40E_BAD_EEPROM, pf->state)) in i40e_open()
8274 if (i40e_force_link_state(pf, true)) in i40e_open()
8282 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
8284 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
8287 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); in i40e_open()
8306 struct i40e_pf *pf = vsi->back; in i40e_vsi_open() local
8324 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
8342 dev_driver_string(&pf->pdev->dev), in i40e_vsi_open()
8343 dev_name(&pf->pdev->dev)); in i40e_vsi_open()
8365 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_open()
8366 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_vsi_open()
8373 * @pf: Pointer to PF
8378 static void i40e_fdir_filter_exit(struct i40e_pf *pf) in i40e_fdir_filter_exit() argument
8385 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_exit()
8390 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_fdir_filter_exit()
8394 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_fdir_filter_exit()
8396 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_fdir_filter_exit()
8400 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_fdir_filter_exit()
8402 pf->fdir_pf_active_filters = 0; in i40e_fdir_filter_exit()
8403 pf->fd_tcp4_filter_cnt = 0; in i40e_fdir_filter_exit()
8404 pf->fd_udp4_filter_cnt = 0; in i40e_fdir_filter_exit()
8405 pf->fd_sctp4_filter_cnt = 0; in i40e_fdir_filter_exit()
8406 pf->fd_ip4_filter_cnt = 0; in i40e_fdir_filter_exit()
8409 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, in i40e_fdir_filter_exit()
8414 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, in i40e_fdir_filter_exit()
8419 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, in i40e_fdir_filter_exit()
8424 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, in i40e_fdir_filter_exit()
8427 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, in i40e_fdir_filter_exit()
8433 * @pf: Pointer to PF
8438 static void i40e_cloud_filter_exit(struct i40e_pf *pf) in i40e_cloud_filter_exit() argument
8444 &pf->cloud_filter_list, cloud_node) { in i40e_cloud_filter_exit()
8448 pf->num_cloud_filters = 0; in i40e_cloud_filter_exit()
8450 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && in i40e_cloud_filter_exit()
8451 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { in i40e_cloud_filter_exit()
8452 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_cloud_filter_exit()
8453 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; in i40e_cloud_filter_exit()
8454 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_cloud_filter_exit()
8479 * i40e_do_reset - Start a PF or Core Reset sequence
8480 * @pf: board private structure
8485 * The essential difference in resets is that the PF Reset
8489 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) in i40e_do_reset() argument
8504 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); in i40e_do_reset()
8505 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
8507 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
8515 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); in i40e_do_reset()
8516 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
8518 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
8519 i40e_flush(&pf->hw); in i40e_do_reset()
8523 /* Request a PF Reset in i40e_do_reset()
8525 * Resets only the PF-specific registers in i40e_do_reset()
8531 dev_dbg(&pf->pdev->dev, "PFR requested\n"); in i40e_do_reset()
8532 i40e_handle_reset_warning(pf, lock_acquired); in i40e_do_reset()
8534 dev_info(&pf->pdev->dev, in i40e_do_reset()
8535 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? in i40e_do_reset()
8543 dev_info(&pf->pdev->dev, in i40e_do_reset()
8545 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
8546 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
8551 i40e_vsi_reinit_locked(pf->vsi[v]); in i40e_do_reset()
8557 dev_info(&pf->pdev->dev, "VSI down requested\n"); in i40e_do_reset()
8558 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
8559 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
8569 dev_info(&pf->pdev->dev, in i40e_do_reset()
8577 * @pf: board private structure
8581 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, in i40e_dcb_need_reconfig() argument
8596 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); in i40e_dcb_need_reconfig()
8602 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); in i40e_dcb_need_reconfig()
8607 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); in i40e_dcb_need_reconfig()
8615 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); in i40e_dcb_need_reconfig()
8623 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); in i40e_dcb_need_reconfig()
8626 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); in i40e_dcb_need_reconfig()
8632 * @pf: board private structure
8635 static int i40e_handle_lldp_event(struct i40e_pf *pf, in i40e_handle_lldp_event() argument
8640 struct i40e_hw *hw = &pf->hw; in i40e_handle_lldp_event()
8647 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) in i40e_handle_lldp_event()
8653 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); in i40e_handle_lldp_event()
8659 dev_dbg(&pf->pdev->dev, in i40e_handle_lldp_event()
8675 ret = i40e_get_dcb_config(&pf->hw); in i40e_handle_lldp_event()
8677 dev_info(&pf->pdev->dev, in i40e_handle_lldp_event()
8679 i40e_stat_str(&pf->hw, ret), in i40e_handle_lldp_event()
8680 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_handle_lldp_event()
8687 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); in i40e_handle_lldp_event()
8691 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, in i40e_handle_lldp_event()
8694 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); in i40e_handle_lldp_event()
8701 pf->flags |= I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
8703 pf->flags &= ~I40E_FLAG_DCB_ENABLED; in i40e_handle_lldp_event()
8705 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
8707 i40e_pf_quiesce_all_vsi(pf); in i40e_handle_lldp_event()
8710 i40e_dcb_reconfigure(pf); in i40e_handle_lldp_event()
8712 ret = i40e_resume_port_tx(pf); in i40e_handle_lldp_event()
8714 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
8719 /* Wait for the PF's queues to be disabled */ in i40e_handle_lldp_event()
8720 ret = i40e_pf_wait_queues_disabled(pf); in i40e_handle_lldp_event()
8722 /* Schedule PF reset to recover */ in i40e_handle_lldp_event()
8723 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_handle_lldp_event()
8724 i40e_service_event_schedule(pf); in i40e_handle_lldp_event()
8726 i40e_pf_unquiesce_all_vsi(pf); in i40e_handle_lldp_event()
8727 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_handle_lldp_event()
8728 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_handle_lldp_event()
8738 * @pf: board private structure
8742 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) in i40e_do_reset_safe() argument
8745 i40e_do_reset(pf, reset_flags, true); in i40e_do_reset_safe()
8751 * @pf: board private structure
8754 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8757 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, in i40e_handle_lan_overflow_event() argument
8764 struct i40e_hw *hw = &pf->hw; in i40e_handle_lan_overflow_event()
8768 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", in i40e_handle_lan_overflow_event()
8777 vf = &pf->vf[vf_id]; in i40e_handle_lan_overflow_event()
8787 * @pf: board private structure
8789 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) in i40e_get_cur_guaranteed_fd_count() argument
8793 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_cur_guaranteed_fd_count()
8799 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8800 * @pf: board private structure
8802 u32 i40e_get_current_fd_count(struct i40e_pf *pf) in i40e_get_current_fd_count() argument
8806 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_current_fd_count()
8815 * @pf: board private structure
8817 u32 i40e_get_global_fd_count(struct i40e_pf *pf) in i40e_get_global_fd_count() argument
8821 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); in i40e_get_global_fd_count()
8830 * @pf: board private structure
8832 static void i40e_reenable_fdir_sb(struct i40e_pf *pf) in i40e_reenable_fdir_sb() argument
8834 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_reenable_fdir_sb()
8835 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_reenable_fdir_sb()
8836 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_sb()
8837 …dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now… in i40e_reenable_fdir_sb()
8842 * @pf: board private structure
8844 static void i40e_reenable_fdir_atr(struct i40e_pf *pf) in i40e_reenable_fdir_atr() argument
8846 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { in i40e_reenable_fdir_atr()
8852 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, in i40e_reenable_fdir_atr()
8856 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_reenable_fdir_atr()
8857 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_atr()
8858 …dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no c… in i40e_reenable_fdir_atr()
8864 * @pf: board private structure
8867 static void i40e_delete_invalid_filter(struct i40e_pf *pf, in i40e_delete_invalid_filter() argument
8871 pf->fdir_pf_active_filters--; in i40e_delete_invalid_filter()
8872 pf->fd_inv = 0; in i40e_delete_invalid_filter()
8876 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
8879 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
8882 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
8887 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
8890 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
8893 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
8896 pf->fd_ip4_filter_cnt--; in i40e_delete_invalid_filter()
8909 * @pf: board private structure
8911 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) in i40e_fdir_check_and_reenable() argument
8917 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_check_and_reenable()
8921 fcnt_prog = i40e_get_global_fd_count(pf); in i40e_fdir_check_and_reenable()
8922 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fdir_check_and_reenable()
8924 (pf->fd_add_err == 0) || in i40e_fdir_check_and_reenable()
8925 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) in i40e_fdir_check_and_reenable()
8926 i40e_reenable_fdir_sb(pf); in i40e_fdir_check_and_reenable()
8933 (pf->fd_tcp4_filter_cnt == 0)) in i40e_fdir_check_and_reenable()
8934 i40e_reenable_fdir_atr(pf); in i40e_fdir_check_and_reenable()
8937 if (pf->fd_inv > 0) { in i40e_fdir_check_and_reenable()
8939 &pf->fdir_filter_list, fdir_node) in i40e_fdir_check_and_reenable()
8940 if (filter->fd_id == pf->fd_inv) in i40e_fdir_check_and_reenable()
8941 i40e_delete_invalid_filter(pf, filter); in i40e_fdir_check_and_reenable()
8949 * @pf: board private structure
8951 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) in i40e_fdir_flush_and_replay() argument
8959 if (!time_after(jiffies, pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
8966 min_flush_time = pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
8968 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; in i40e_fdir_flush_and_replay()
8972 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
8973 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); in i40e_fdir_flush_and_replay()
8977 pf->fd_flush_timestamp = jiffies; in i40e_fdir_flush_and_replay()
8978 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
8980 wr32(&pf->hw, I40E_PFQF_CTL_1, in i40e_fdir_flush_and_replay()
8982 i40e_flush(&pf->hw); in i40e_fdir_flush_and_replay()
8983 pf->fd_flush_cnt++; in i40e_fdir_flush_and_replay()
8984 pf->fd_add_err = 0; in i40e_fdir_flush_and_replay()
8988 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); in i40e_fdir_flush_and_replay()
8993 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); in i40e_fdir_flush_and_replay()
8996 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); in i40e_fdir_flush_and_replay()
8997 if (!disable_atr && !pf->fd_tcp4_filter_cnt) in i40e_fdir_flush_and_replay()
8998 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
8999 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fdir_flush_and_replay()
9000 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9001 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); in i40e_fdir_flush_and_replay()
9007 * @pf: board private structure
9009 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) in i40e_get_current_atr_cnt() argument
9011 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; in i40e_get_current_atr_cnt()
9016 * @pf: board private structure
9018 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) in i40e_fdir_reinit_subtask() argument
9022 if (test_bit(__I40E_DOWN, pf->state)) in i40e_fdir_reinit_subtask()
9025 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_reinit_subtask()
9026 i40e_fdir_flush_and_replay(pf); in i40e_fdir_reinit_subtask()
9028 i40e_fdir_check_and_reenable(pf); in i40e_fdir_reinit_subtask()
9074 struct i40e_pf *pf; in i40e_veb_link_event() local
9077 if (!veb || !veb->pf) in i40e_veb_link_event()
9079 pf = veb->pf; in i40e_veb_link_event()
9083 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9084 i40e_veb_link_event(pf->veb[i], link_up); in i40e_veb_link_event()
9087 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_veb_link_event()
9088 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9089 i40e_vsi_link_event(pf->vsi[i], link_up); in i40e_veb_link_event()
9094 * @pf: board private structure
9096 static void i40e_link_event(struct i40e_pf *pf) in i40e_link_event() argument
9098 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_link_event()
9104 pf->hw.phy.get_link_info = true; in i40e_link_event()
9105 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); in i40e_link_event()
9106 status = i40e_get_link_status(&pf->hw, &new_link); in i40e_link_event()
9110 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9115 set_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9116 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", in i40e_link_event()
9121 old_link_speed = pf->hw.phy.link_info_old.link_speed; in i40e_link_event()
9122 new_link_speed = pf->hw.phy.link_info.link_speed; in i40e_link_event()
9135 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_link_event()
9136 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); in i40e_link_event()
9140 if (pf->vf) in i40e_link_event()
9141 i40e_vc_notify_link_state(pf); in i40e_link_event()
9143 if (pf->flags & I40E_FLAG_PTP) in i40e_link_event()
9144 i40e_ptp_set_increment(pf); in i40e_link_event()
9149 * @pf: board private structure
9151 static void i40e_watchdog_subtask(struct i40e_pf *pf) in i40e_watchdog_subtask() argument
9156 if (test_bit(__I40E_DOWN, pf->state) || in i40e_watchdog_subtask()
9157 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_watchdog_subtask()
9161 if (time_before(jiffies, (pf->service_timer_previous + in i40e_watchdog_subtask()
9162 pf->service_timer_period))) in i40e_watchdog_subtask()
9164 pf->service_timer_previous = jiffies; in i40e_watchdog_subtask()
9166 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || in i40e_watchdog_subtask()
9167 test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) in i40e_watchdog_subtask()
9168 i40e_link_event(pf); in i40e_watchdog_subtask()
9173 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_watchdog_subtask()
9174 if (pf->vsi[i] && pf->vsi[i]->netdev) in i40e_watchdog_subtask()
9175 i40e_update_stats(pf->vsi[i]); in i40e_watchdog_subtask()
9177 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { in i40e_watchdog_subtask()
9180 if (pf->veb[i]) in i40e_watchdog_subtask()
9181 i40e_update_veb_stats(pf->veb[i]); in i40e_watchdog_subtask()
9184 i40e_ptp_rx_hang(pf); in i40e_watchdog_subtask()
9185 i40e_ptp_tx_hang(pf); in i40e_watchdog_subtask()
9190 * @pf: board private structure
9192 static void i40e_reset_subtask(struct i40e_pf *pf) in i40e_reset_subtask() argument
9196 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { in i40e_reset_subtask()
9198 clear_bit(__I40E_REINIT_REQUESTED, pf->state); in i40e_reset_subtask()
9200 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
9202 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
9204 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
9206 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
9208 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
9210 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
9212 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { in i40e_reset_subtask()
9214 clear_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_reset_subtask()
9220 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_reset_subtask()
9221 i40e_prep_for_reset(pf, false); in i40e_reset_subtask()
9222 i40e_reset(pf); in i40e_reset_subtask()
9223 i40e_rebuild(pf, false, false); in i40e_reset_subtask()
9228 !test_bit(__I40E_DOWN, pf->state) && in i40e_reset_subtask()
9229 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_reset_subtask()
9230 i40e_do_reset(pf, reset_flags, false); in i40e_reset_subtask()
9236 * @pf: board private structure
9239 static void i40e_handle_link_event(struct i40e_pf *pf, in i40e_handle_link_event() argument
9251 i40e_link_event(pf); in i40e_handle_link_event()
9255 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9257 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9266 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { in i40e_handle_link_event()
9267 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9269 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
9277 * @pf: board private structure
9279 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) in i40e_clean_adminq_subtask() argument
9282 struct i40e_hw *hw = &pf->hw; in i40e_clean_adminq_subtask()
9289 /* Do not run clean AQ when PF reset fails */ in i40e_clean_adminq_subtask()
9290 if (test_bit(__I40E_RESET_FAILED, pf->state)) in i40e_clean_adminq_subtask()
9294 val = rd32(&pf->hw, pf->hw.aq.arq.len); in i40e_clean_adminq_subtask()
9298 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); in i40e_clean_adminq_subtask()
9303 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
9305 pf->arq_overflows++; in i40e_clean_adminq_subtask()
9309 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
9313 wr32(&pf->hw, pf->hw.aq.arq.len, val); in i40e_clean_adminq_subtask()
9315 val = rd32(&pf->hw, pf->hw.aq.asq.len); in i40e_clean_adminq_subtask()
9318 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9319 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); in i40e_clean_adminq_subtask()
9323 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9324 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
9328 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
9329 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
9333 wr32(&pf->hw, pf->hw.aq.asq.len, val); in i40e_clean_adminq_subtask()
9345 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); in i40e_clean_adminq_subtask()
9353 i40e_handle_link_event(pf, &event); in i40e_clean_adminq_subtask()
9356 ret = i40e_vc_process_vf_msg(pf, in i40e_clean_adminq_subtask()
9364 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); in i40e_clean_adminq_subtask()
9367 ret = i40e_handle_lldp_event(pf, &event); in i40e_clean_adminq_subtask()
9372 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); in i40e_clean_adminq_subtask()
9373 i40e_handle_lan_overflow_event(pf, &event); in i40e_clean_adminq_subtask()
9376 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); in i40e_clean_adminq_subtask()
9381 i40e_debug(&pf->hw, I40E_DEBUG_NVM, in i40e_clean_adminq_subtask()
9386 dev_info(&pf->pdev->dev, in i40e_clean_adminq_subtask()
9391 } while (i++ < pf->adminq_work_limit); in i40e_clean_adminq_subtask()
9393 if (i < pf->adminq_work_limit) in i40e_clean_adminq_subtask()
9394 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_clean_adminq_subtask()
9407 * @pf: board private structure
9409 static void i40e_verify_eeprom(struct i40e_pf *pf) in i40e_verify_eeprom() argument
9413 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
9416 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
9418 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", in i40e_verify_eeprom()
9420 set_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
9424 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_verify_eeprom()
9425 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); in i40e_verify_eeprom()
9426 clear_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
9432 * @pf: pointer to the PF structure
9436 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) in i40e_enable_pf_switch_lb() argument
9438 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_enable_pf_switch_lb()
9442 ctxt.seid = pf->main_vsi_seid; in i40e_enable_pf_switch_lb()
9443 ctxt.pf_num = pf->hw.pf_id; in i40e_enable_pf_switch_lb()
9445 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
9447 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
9448 "couldn't get PF vsi config, err %s aq_err %s\n", in i40e_enable_pf_switch_lb()
9449 i40e_stat_str(&pf->hw, ret), in i40e_enable_pf_switch_lb()
9450 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
9459 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
9461 i40e_stat_str(&pf->hw, ret), in i40e_enable_pf_switch_lb()
9462 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
9468 * @pf: pointer to the PF structure
9472 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) in i40e_disable_pf_switch_lb() argument
9474 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_disable_pf_switch_lb()
9478 ctxt.seid = pf->main_vsi_seid; in i40e_disable_pf_switch_lb()
9479 ctxt.pf_num = pf->hw.pf_id; in i40e_disable_pf_switch_lb()
9481 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
9483 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
9484 "couldn't get PF vsi config, err %s aq_err %s\n", in i40e_disable_pf_switch_lb()
9485 i40e_stat_str(&pf->hw, ret), in i40e_disable_pf_switch_lb()
9486 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
9495 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
9497 i40e_stat_str(&pf->hw, ret), in i40e_disable_pf_switch_lb()
9498 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
9512 struct i40e_pf *pf = veb->pf; in i40e_config_bridge_mode() local
9514 if (pf->hw.debug_mask & I40E_DEBUG_LAN) in i40e_config_bridge_mode()
9515 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", in i40e_config_bridge_mode()
9518 i40e_disable_pf_switch_lb(pf); in i40e_config_bridge_mode()
9520 i40e_enable_pf_switch_lb(pf); in i40e_config_bridge_mode()
9535 struct i40e_pf *pf = veb->pf; in i40e_reconstitute_veb() local
9540 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { in i40e_reconstitute_veb()
9541 if (pf->vsi[v] && in i40e_reconstitute_veb()
9542 pf->vsi[v]->veb_idx == veb->idx && in i40e_reconstitute_veb()
9543 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_reconstitute_veb()
9544 ctl_vsi = pf->vsi[v]; in i40e_reconstitute_veb()
9549 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
9554 if (ctl_vsi != pf->vsi[pf->lan_vsi]) in i40e_reconstitute_veb()
9555 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_reconstitute_veb()
9558 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
9570 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_reconstitute_veb()
9577 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_reconstitute_veb()
9578 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) in i40e_reconstitute_veb()
9581 if (pf->vsi[v]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
9582 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_reconstitute_veb()
9587 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
9598 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
9599 pf->veb[veb_idx]->uplink_seid = veb->seid; in i40e_reconstitute_veb()
9600 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); in i40e_reconstitute_veb()
9612 * @pf: the PF struct
9615 static int i40e_get_capabilities(struct i40e_pf *pf, in i40e_get_capabilities() argument
9630 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, in i40e_get_capabilities()
9636 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { in i40e_get_capabilities()
9639 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { in i40e_get_capabilities()
9640 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
9642 i40e_stat_str(&pf->hw, err), in i40e_get_capabilities()
9643 i40e_aq_str(&pf->hw, in i40e_get_capabilities()
9644 pf->hw.aq.asq_last_status)); in i40e_get_capabilities()
9649 if (pf->hw.debug_mask & I40E_DEBUG_USER) { in i40e_get_capabilities()
9651 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
9652 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", in i40e_get_capabilities()
9653 pf->hw.pf_id, pf->hw.func_caps.num_vfs, in i40e_get_capabilities()
9654 pf->hw.func_caps.num_msix_vectors, in i40e_get_capabilities()
9655 pf->hw.func_caps.num_msix_vectors_vf, in i40e_get_capabilities()
9656 pf->hw.func_caps.fd_filters_guaranteed, in i40e_get_capabilities()
9657 pf->hw.func_caps.fd_filters_best_effort, in i40e_get_capabilities()
9658 pf->hw.func_caps.num_tx_qp, in i40e_get_capabilities()
9659 pf->hw.func_caps.num_vsis); in i40e_get_capabilities()
9661 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
9663 pf->hw.dev_caps.switch_mode, in i40e_get_capabilities()
9664 pf->hw.dev_caps.valid_functions); in i40e_get_capabilities()
9665 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
9667 pf->hw.dev_caps.sr_iov_1_1, in i40e_get_capabilities()
9668 pf->hw.dev_caps.num_vfs); in i40e_get_capabilities()
9669 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
9671 pf->hw.dev_caps.num_vsis, in i40e_get_capabilities()
9672 pf->hw.dev_caps.num_rx_qp, in i40e_get_capabilities()
9673 pf->hw.dev_caps.num_tx_qp); in i40e_get_capabilities()
9677 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ in i40e_get_capabilities()
9678 + pf->hw.func_caps.num_vfs) in i40e_get_capabilities()
9679 if (pf->hw.revision_id == 0 && in i40e_get_capabilities()
9680 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { in i40e_get_capabilities()
9681 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
9683 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); in i40e_get_capabilities()
9684 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; in i40e_get_capabilities()
9694 * @pf: board private structure
9696 static void i40e_fdir_sb_setup(struct i40e_pf *pf) in i40e_fdir_sb_setup() argument
9703 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { in i40e_fdir_sb_setup()
9712 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); in i40e_fdir_sb_setup()
9715 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_fdir_sb_setup()
9719 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_sb_setup()
9723 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, in i40e_fdir_sb_setup()
9724 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
9726 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); in i40e_fdir_sb_setup()
9727 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_fdir_sb_setup()
9728 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_fdir_sb_setup()
9738 * @pf: board private structure
9740 static void i40e_fdir_teardown(struct i40e_pf *pf) in i40e_fdir_teardown() argument
9744 i40e_fdir_filter_exit(pf); in i40e_fdir_teardown()
9745 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); in i40e_fdir_teardown()
9752 * @vsi: PF main vsi
9761 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters() local
9766 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, in i40e_rebuild_cloud_filters()
9778 dev_dbg(&pf->pdev->dev, in i40e_rebuild_cloud_filters()
9780 i40e_stat_str(&pf->hw, ret), in i40e_rebuild_cloud_filters()
9781 i40e_aq_str(&pf->hw, in i40e_rebuild_cloud_filters()
9782 pf->hw.aq.asq_last_status)); in i40e_rebuild_cloud_filters()
9791 * @vsi: PF main vsi
9852 * @pf: board private structure
9856 * Close up the VFs and other things in prep for PF Reset.
9858 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) in i40e_prep_for_reset() argument
9860 struct i40e_hw *hw = &pf->hw; in i40e_prep_for_reset()
9864 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_prep_for_reset()
9865 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_prep_for_reset()
9867 if (i40e_check_asq_alive(&pf->hw)) in i40e_prep_for_reset()
9868 i40e_vc_notify_reset(pf); in i40e_prep_for_reset()
9870 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); in i40e_prep_for_reset()
9876 i40e_pf_quiesce_all_vsi(pf); in i40e_prep_for_reset()
9880 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_prep_for_reset()
9881 if (pf->vsi[v]) in i40e_prep_for_reset()
9882 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
9885 i40e_shutdown_adminq(&pf->hw); in i40e_prep_for_reset()
9891 dev_warn(&pf->pdev->dev, in i40e_prep_for_reset()
9898 i40e_ptp_save_hw_time(pf); in i40e_prep_for_reset()
9903 * @pf: PF struct
9905 static void i40e_send_version(struct i40e_pf *pf) in i40e_send_version() argument
9914 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); in i40e_send_version()
9963 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9964 * @pf: board private structure
9966 static int i40e_reset(struct i40e_pf *pf) in i40e_reset() argument
9968 struct i40e_hw *hw = &pf->hw; in i40e_reset()
9973 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); in i40e_reset()
9974 set_bit(__I40E_RESET_FAILED, pf->state); in i40e_reset()
9975 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_reset()
9977 pf->pfr_count++; in i40e_reset()
9984 * @pf: board private structure
9989 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) in i40e_rebuild() argument
9991 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_rebuild()
9992 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_rebuild()
9993 struct i40e_hw *hw = &pf->hw; in i40e_rebuild()
9999 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10000 i40e_check_recovery_mode(pf)) { in i40e_rebuild()
10001 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); in i40e_rebuild()
10004 if (test_bit(__I40E_DOWN, pf->state) && in i40e_rebuild()
10005 !test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_rebuild()
10008 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); in i40e_rebuild()
10011 ret = i40e_init_adminq(&pf->hw); in i40e_rebuild()
10013 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", in i40e_rebuild()
10014 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10015 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10018 i40e_get_oem_version(&pf->hw); in i40e_rebuild()
10020 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10032 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) in i40e_rebuild()
10033 i40e_verify_eeprom(pf); in i40e_rebuild()
10039 if (test_bit(__I40E_RECOVERY_MODE, pf->state) || in i40e_rebuild()
10041 if (i40e_get_capabilities(pf, in i40e_rebuild()
10045 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_rebuild()
10049 if (i40e_setup_misc_vector_for_recovery_mode(pf)) in i40e_rebuild()
10058 free_irq(pf->pdev->irq, pf); in i40e_rebuild()
10059 i40e_clear_interrupt_scheme(pf); in i40e_rebuild()
10060 if (i40e_restore_interrupt_scheme(pf)) in i40e_rebuild()
10065 i40e_send_version(pf); in i40e_rebuild()
10074 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); in i40e_rebuild()
10081 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10086 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10094 ret = i40e_init_pf_dcb(pf); in i40e_rebuild()
10096 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); in i40e_rebuild()
10097 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; in i40e_rebuild()
10104 ret = i40e_setup_pf_switch(pf, reinit); in i40e_rebuild()
10111 ret = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_rebuild()
10116 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", in i40e_rebuild()
10117 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10118 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10121 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); in i40e_rebuild()
10123 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", in i40e_rebuild()
10124 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10125 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10132 * try to recover minimal use by getting the basic PF VSI working. in i40e_rebuild()
10134 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
10135 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); in i40e_rebuild()
10138 if (!pf->veb[v]) in i40e_rebuild()
10141 if (pf->veb[v]->uplink_seid == pf->mac_seid || in i40e_rebuild()
10142 pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10143 ret = i40e_reconstitute_veb(pf->veb[v]); in i40e_rebuild()
10150 * for minimal rebuild of PF VSI. in i40e_rebuild()
10154 if (pf->veb[v]->uplink_seid == pf->mac_seid) { in i40e_rebuild()
10155 dev_info(&pf->pdev->dev, in i40e_rebuild()
10156 "rebuild of switch failed: %d, will try to set up simple PF connection\n", in i40e_rebuild()
10158 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
10160 } else if (pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
10161 dev_info(&pf->pdev->dev, in i40e_rebuild()
10169 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
10170 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); in i40e_rebuild()
10174 dev_info(&pf->pdev->dev, in i40e_rebuild()
10202 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs in i40e_rebuild()
10223 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_rebuild()
10225 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_rebuild()
10227 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", in i40e_rebuild()
10228 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10229 i40e_aq_str(&pf->hw, in i40e_rebuild()
10230 pf->hw.aq.asq_last_status)); in i40e_rebuild()
10233 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_rebuild()
10234 ret = i40e_setup_misc_vector(pf); in i40e_rebuild()
10239 * PF/VF VSIs. in i40e_rebuild()
10242 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_rebuild()
10243 pf->main_vsi_seid); in i40e_rebuild()
10246 i40e_pf_unquiesce_all_vsi(pf); in i40e_rebuild()
10253 ret = i40e_set_promiscuous(pf, pf->cur_promisc); in i40e_rebuild()
10255 dev_warn(&pf->pdev->dev, in i40e_rebuild()
10257 pf->cur_promisc ? "on" : "off", in i40e_rebuild()
10258 i40e_stat_str(&pf->hw, ret), in i40e_rebuild()
10259 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10261 i40e_reset_all_vfs(pf, true); in i40e_rebuild()
10264 i40e_send_version(pf); in i40e_rebuild()
10273 clear_bit(__I40E_RESET_FAILED, pf->state); in i40e_rebuild()
10275 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_rebuild()
10276 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); in i40e_rebuild()
10281 * @pf: board private structure
10286 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, in i40e_reset_and_rebuild() argument
10294 ret = i40e_reset(pf); in i40e_reset_and_rebuild()
10296 i40e_rebuild(pf, reinit, lock_acquired); in i40e_reset_and_rebuild()
10300 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10301 * @pf: board private structure
10308 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) in i40e_handle_reset_warning() argument
10310 i40e_prep_for_reset(pf, lock_acquired); in i40e_handle_reset_warning()
10311 i40e_reset_and_rebuild(pf, false, lock_acquired); in i40e_handle_reset_warning()
10316 * @pf: pointer to the PF structure
10320 static void i40e_handle_mdd_event(struct i40e_pf *pf) in i40e_handle_mdd_event() argument
10322 struct i40e_hw *hw = &pf->hw; in i40e_handle_mdd_event()
10328 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) in i40e_handle_mdd_event()
10342 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
10343 if (netif_msg_tx_err(pf)) in i40e_handle_mdd_event()
10344 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x … in i40e_handle_mdd_event()
10357 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
10358 if (netif_msg_rx_err(pf)) in i40e_handle_mdd_event()
10359 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02… in i40e_handle_mdd_event()
10369 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); in i40e_handle_mdd_event()
10374 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); in i40e_handle_mdd_event()
10379 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { in i40e_handle_mdd_event()
10380 vf = &(pf->vf[i]); in i40e_handle_mdd_event()
10385 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
10387 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
10388 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
10396 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
10398 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
10399 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
10405 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_handle_mdd_event()
10418 struct i40e_pf *pf = container_of(work, in i40e_service_task() local
10424 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_service_task()
10425 test_bit(__I40E_SUSPENDED, pf->state)) in i40e_service_task()
10428 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) in i40e_service_task()
10431 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_service_task()
10432 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); in i40e_service_task()
10433 i40e_sync_filters_subtask(pf); in i40e_service_task()
10434 i40e_reset_subtask(pf); in i40e_service_task()
10435 i40e_handle_mdd_event(pf); in i40e_service_task()
10436 i40e_vc_process_vflr_event(pf); in i40e_service_task()
10437 i40e_watchdog_subtask(pf); in i40e_service_task()
10438 i40e_fdir_reinit_subtask(pf); in i40e_service_task()
10439 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { in i40e_service_task()
10441 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], in i40e_service_task()
10444 i40e_client_subtask(pf); in i40e_service_task()
10446 pf->state)) in i40e_service_task()
10448 pf->vsi[pf->lan_vsi]); in i40e_service_task()
10450 i40e_sync_filters_subtask(pf); in i40e_service_task()
10452 i40e_reset_subtask(pf); in i40e_service_task()
10455 i40e_clean_adminq_subtask(pf); in i40e_service_task()
10459 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_service_task()
10465 if (time_after(jiffies, (start_time + pf->service_timer_period)) || in i40e_service_task()
10466 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || in i40e_service_task()
10467 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || in i40e_service_task()
10468 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) in i40e_service_task()
10469 i40e_service_event_schedule(pf); in i40e_service_task()
10478 struct i40e_pf *pf = from_timer(pf, t, service_timer); in i40e_service_timer() local
10480 mod_timer(&pf->service_timer, in i40e_service_timer()
10481 round_jiffies(jiffies + pf->service_timer_period)); in i40e_service_timer()
10482 i40e_service_event_schedule(pf); in i40e_service_timer()
10491 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi() local
10495 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
10502 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_set_num_rings_in_vsi()
10503 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
10515 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
10519 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
10526 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
10530 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
10591 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10592 * @pf: board private structure
10596 * On success: returns vsi index in PF (positive)
10598 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) in i40e_vsi_mem_alloc() argument
10605 /* Need to protect the allocation of the VSIs at the PF level */ in i40e_vsi_mem_alloc()
10606 mutex_lock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
10614 i = pf->next_vsi; in i40e_vsi_mem_alloc()
10615 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
10617 if (i >= pf->num_alloc_vsi) { in i40e_vsi_mem_alloc()
10619 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
10623 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
10629 pf->next_vsi = ++i; in i40e_vsi_mem_alloc()
10637 vsi->back = pf; in i40e_vsi_mem_alloc()
10643 pf->rss_table_size : 64; in i40e_vsi_mem_alloc()
10650 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
10668 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
10674 pf->next_vsi = i - 1; in i40e_vsi_mem_alloc()
10677 mutex_unlock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
10725 struct i40e_pf *pf; in i40e_vsi_clear() local
10732 pf = vsi->back; in i40e_vsi_clear()
10734 mutex_lock(&pf->switch_mutex); in i40e_vsi_clear()
10735 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
10736 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", in i40e_vsi_clear()
10741 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
10742 dev_err(&pf->pdev->dev, in i40e_vsi_clear()
10743 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", in i40e_vsi_clear()
10744 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
10745 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
10750 /* updates the PF for this cleared vsi */ in i40e_vsi_clear()
10751 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
10752 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
10758 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
10759 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
10760 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
10763 mutex_unlock(&pf->switch_mutex); in i40e_vsi_clear()
10796 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings() local
10811 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
10817 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
10828 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
10835 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
10844 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
10848 ring->itr_setting = pf->rx_itr_default; in i40e_alloc_rings()
10861 * @pf: board private structure
10866 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) in i40e_reserve_msix_vectors() argument
10868 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, in i40e_reserve_msix_vectors()
10871 dev_info(&pf->pdev->dev, in i40e_reserve_msix_vectors()
10881 * @pf: board private structure
10887 static int i40e_init_msix(struct i40e_pf *pf) in i40e_init_msix() argument
10889 struct i40e_hw *hw = &pf->hw; in i40e_init_msix()
10896 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_init_msix()
10923 /* reserve some vectors for the main PF traffic queues. Initially we in i40e_init_msix()
10931 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); in i40e_init_msix()
10932 vectors_left -= pf->num_lan_msix; in i40e_init_msix()
10935 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
10937 pf->num_fdsb_msix = 1; in i40e_init_msix()
10941 pf->num_fdsb_msix = 0; in i40e_init_msix()
10946 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
10947 iwarp_requested = pf->num_iwarp_msix; in i40e_init_msix()
10950 pf->num_iwarp_msix = 0; in i40e_init_msix()
10951 else if (vectors_left < pf->num_iwarp_msix) in i40e_init_msix()
10952 pf->num_iwarp_msix = 1; in i40e_init_msix()
10953 v_budget += pf->num_iwarp_msix; in i40e_init_msix()
10954 vectors_left -= pf->num_iwarp_msix; in i40e_init_msix()
10958 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { in i40e_init_msix()
10960 pf->num_vmdq_msix = 0; in i40e_init_msix()
10961 pf->num_vmdq_qps = 0; in i40e_init_msix()
10964 pf->num_vmdq_vsis * pf->num_vmdq_qps; in i40e_init_msix()
10971 * queues/vectors used by the PF later with the ethtool in i40e_init_msix()
10975 pf->num_vmdq_qps = 1; in i40e_init_msix()
10976 vmdq_vecs_wanted = pf->num_vmdq_vsis; in i40e_init_msix()
10981 pf->num_vmdq_msix = pf->num_vmdq_qps; in i40e_init_msix()
10997 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); in i40e_init_msix()
10998 pf->num_lan_msix += extra_vectors; in i40e_init_msix()
11004 v_budget += pf->num_lan_msix; in i40e_init_msix()
11005 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), in i40e_init_msix()
11007 if (!pf->msix_entries) in i40e_init_msix()
11011 pf->msix_entries[i].entry = i; in i40e_init_msix()
11012 v_actual = i40e_reserve_msix_vectors(pf, v_budget); in i40e_init_msix()
11015 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; in i40e_init_msix()
11016 kfree(pf->msix_entries); in i40e_init_msix()
11017 pf->msix_entries = NULL; in i40e_init_msix()
11018 pci_disable_msix(pf->pdev); in i40e_init_msix()
11023 pf->num_vmdq_vsis = 0; in i40e_init_msix()
11024 pf->num_vmdq_qps = 0; in i40e_init_msix()
11025 pf->num_lan_qps = 1; in i40e_init_msix()
11026 pf->num_lan_msix = 1; in i40e_init_msix()
11036 dev_info(&pf->pdev->dev, in i40e_init_msix()
11043 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ in i40e_init_msix()
11044 pf->num_vmdq_vsis = 1; in i40e_init_msix()
11045 pf->num_vmdq_qps = 1; in i40e_init_msix()
11050 pf->num_lan_msix = 1; in i40e_init_msix()
11053 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11054 pf->num_lan_msix = 1; in i40e_init_msix()
11055 pf->num_iwarp_msix = 1; in i40e_init_msix()
11057 pf->num_lan_msix = 2; in i40e_init_msix()
11061 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_init_msix()
11062 pf->num_iwarp_msix = min_t(int, (vec / 3), in i40e_init_msix()
11064 pf->num_vmdq_vsis = min_t(int, (vec / 3), in i40e_init_msix()
11067 pf->num_vmdq_vsis = min_t(int, (vec / 2), in i40e_init_msix()
11070 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_init_msix()
11071 pf->num_fdsb_msix = 1; in i40e_init_msix()
11074 pf->num_lan_msix = min_t(int, in i40e_init_msix()
11075 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), in i40e_init_msix()
11076 pf->num_lan_msix); in i40e_init_msix()
11077 pf->num_lan_qps = pf->num_lan_msix; in i40e_init_msix()
11082 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_init_msix()
11083 (pf->num_fdsb_msix == 0)) { in i40e_init_msix()
11084 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11085 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_init_msix()
11086 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_msix()
11088 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_init_msix()
11089 (pf->num_vmdq_msix == 0)) { in i40e_init_msix()
11090 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11091 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; in i40e_init_msix()
11094 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && in i40e_init_msix()
11095 (pf->num_iwarp_msix == 0)) { in i40e_init_msix()
11096 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11097 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_init_msix()
11099 i40e_debug(&pf->hw, I40E_DEBUG_INIT, in i40e_init_msix()
11100 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", in i40e_init_msix()
11101 pf->num_lan_msix, in i40e_init_msix()
11102 pf->num_vmdq_msix * pf->num_vmdq_vsis, in i40e_init_msix()
11103 pf->num_fdsb_msix, in i40e_init_msix()
11104 pf->num_iwarp_msix); in i40e_init_msix()
11148 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors() local
11152 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_vsi_alloc_q_vectors()
11154 else if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_alloc_q_vectors()
11176 * @pf: board private structure to initialize
11178 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) in i40e_init_interrupt_scheme() argument
11183 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_init_interrupt_scheme()
11184 vectors = i40e_init_msix(pf); in i40e_init_interrupt_scheme()
11186 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | in i40e_init_interrupt_scheme()
11195 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_init_interrupt_scheme()
11198 i40e_determine_queue_usage(pf); in i40e_init_interrupt_scheme()
11202 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_init_interrupt_scheme()
11203 (pf->flags & I40E_FLAG_MSI_ENABLED)) { in i40e_init_interrupt_scheme()
11204 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); in i40e_init_interrupt_scheme()
11205 vectors = pci_enable_msi(pf->pdev); in i40e_init_interrupt_scheme()
11207 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", in i40e_init_interrupt_scheme()
11209 pf->flags &= ~I40E_FLAG_MSI_ENABLED; in i40e_init_interrupt_scheme()
11214 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) in i40e_init_interrupt_scheme()
11215 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); in i40e_init_interrupt_scheme()
11219 pf->irq_pile = kzalloc(size, GFP_KERNEL); in i40e_init_interrupt_scheme()
11220 if (!pf->irq_pile) in i40e_init_interrupt_scheme()
11223 pf->irq_pile->num_entries = vectors; in i40e_init_interrupt_scheme()
11224 pf->irq_pile->search_hint = 0; in i40e_init_interrupt_scheme()
11227 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); in i40e_init_interrupt_scheme()
11234 * @pf: private board data structure
11240 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) in i40e_restore_interrupt_scheme() argument
11248 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); in i40e_restore_interrupt_scheme()
11250 err = i40e_init_interrupt_scheme(pf); in i40e_restore_interrupt_scheme()
11257 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_restore_interrupt_scheme()
11258 if (pf->vsi[i]) { in i40e_restore_interrupt_scheme()
11259 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
11262 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
11266 err = i40e_setup_misc_vector(pf); in i40e_restore_interrupt_scheme()
11270 if (pf->flags & I40E_FLAG_IWARP_ENABLED) in i40e_restore_interrupt_scheme()
11271 i40e_client_update_msix_info(pf); in i40e_restore_interrupt_scheme()
11277 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
11278 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
11287 * @pf: board private structure
11294 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) in i40e_setup_misc_vector_for_recovery_mode() argument
11298 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_setup_misc_vector_for_recovery_mode()
11299 err = i40e_setup_misc_vector(pf); in i40e_setup_misc_vector_for_recovery_mode()
11302 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
11308 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; in i40e_setup_misc_vector_for_recovery_mode()
11310 err = request_irq(pf->pdev->irq, i40e_intr, flags, in i40e_setup_misc_vector_for_recovery_mode()
11311 pf->int_name, pf); in i40e_setup_misc_vector_for_recovery_mode()
11314 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
11319 i40e_enable_misc_int_causes(pf); in i40e_setup_misc_vector_for_recovery_mode()
11320 i40e_irq_dynamic_enable_icr0(pf); in i40e_setup_misc_vector_for_recovery_mode()
11328 * @pf: board private structure
11334 static int i40e_setup_misc_vector(struct i40e_pf *pf) in i40e_setup_misc_vector() argument
11336 struct i40e_hw *hw = &pf->hw; in i40e_setup_misc_vector()
11340 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { in i40e_setup_misc_vector()
11341 err = request_irq(pf->msix_entries[0].vector, in i40e_setup_misc_vector()
11342 i40e_intr, 0, pf->int_name, pf); in i40e_setup_misc_vector()
11344 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_setup_misc_vector()
11345 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector()
11347 pf->int_name, err); in i40e_setup_misc_vector()
11352 i40e_enable_misc_int_causes(pf); in i40e_setup_misc_vector()
11360 i40e_irq_dynamic_enable_icr0(pf); in i40e_setup_misc_vector()
11377 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq() local
11378 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_aq()
11385 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
11387 i40e_stat_str(&pf->hw, ret), in i40e_get_rss_aq()
11388 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
11389 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
11399 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
11401 i40e_stat_str(&pf->hw, ret), in i40e_get_rss_aq()
11402 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
11403 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
11423 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg() local
11424 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_reg()
11439 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); in i40e_config_rss_reg()
11457 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_config_rss_reg()
11477 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg() local
11478 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_reg()
11510 struct i40e_pf *pf = vsi->back; in i40e_config_rss() local
11512 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_config_rss()
11529 struct i40e_pf *pf = vsi->back; in i40e_get_rss() local
11531 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) in i40e_get_rss()
11539 * @pf: Pointer to board private structure
11544 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, in i40e_fill_rss_lut() argument
11555 * @pf: board private structure
11557 static int i40e_pf_config_rss(struct i40e_pf *pf) in i40e_pf_config_rss() argument
11559 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_pf_config_rss()
11562 struct i40e_hw *hw = &pf->hw; in i40e_pf_config_rss()
11570 hena |= i40e_pf_get_default_rss_hena(pf); in i40e_pf_config_rss()
11577 reg_val = (pf->rss_table_size == 512) ? in i40e_pf_config_rss()
11592 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
11605 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
11622 * @pf: board private structure
11629 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) in i40e_reconfig_rss_queues() argument
11631 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_reconfig_rss_queues()
11634 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_reconfig_rss_queues()
11638 new_rss_size = min_t(int, queue_count, pf->rss_size_max); in i40e_reconfig_rss_queues()
11644 i40e_prep_for_reset(pf, true); in i40e_reconfig_rss_queues()
11646 pf->alloc_rss_size = new_rss_size; in i40e_reconfig_rss_queues()
11648 i40e_reset_and_rebuild(pf, true, true); in i40e_reconfig_rss_queues()
11655 dev_dbg(&pf->pdev->dev, in i40e_reconfig_rss_queues()
11661 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
11663 i40e_pf_config_rss(pf); in i40e_reconfig_rss_queues()
11665 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", in i40e_reconfig_rss_queues()
11666 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
11667 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
11671 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11672 * @pf: board private structure
11674 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) in i40e_get_partition_bw_setting() argument
11680 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, in i40e_get_partition_bw_setting()
11685 pf->min_bw = min_bw; in i40e_get_partition_bw_setting()
11687 pf->max_bw = max_bw; in i40e_get_partition_bw_setting()
11694 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11695 * @pf: board private structure
11697 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) in i40e_set_partition_bw_setting() argument
11702 /* Set the valid bit for this PF */ in i40e_set_partition_bw_setting()
11703 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); in i40e_set_partition_bw_setting()
11704 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
11705 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
11708 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); in i40e_set_partition_bw_setting()
11714 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11715 * @pf: board private structure
11717 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) in i40e_commit_partition_bw_setting() argument
11724 if (pf->hw.partition_id != 1) { in i40e_commit_partition_bw_setting()
11725 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
11727 pf->hw.partition_id); in i40e_commit_partition_bw_setting()
11733 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); in i40e_commit_partition_bw_setting()
11734 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
11736 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
11738 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
11739 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
11744 ret = i40e_aq_read_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
11751 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
11752 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
11754 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", in i40e_commit_partition_bw_setting()
11755 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
11756 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
11764 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); in i40e_commit_partition_bw_setting()
11765 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
11767 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
11769 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
11770 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
11777 ret = i40e_aq_update_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
11784 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
11785 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
11787 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
11789 i40e_stat_str(&pf->hw, ret), in i40e_commit_partition_bw_setting()
11790 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
11798 * if total port shutdown feature is enabled for this PF
11799 * @pf: board private structure
11801 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) in i40e_is_total_port_shutdown_enabled() argument
11816 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
11821 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
11828 read_status = i40e_read_nvm_module_data(&pf->hw, in i40e_is_total_port_shutdown_enabled()
11836 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); in i40e_is_total_port_shutdown_enabled()
11842 dev_warn(&pf->pdev->dev, in i40e_is_total_port_shutdown_enabled()
11844 i40e_stat_str(&pf->hw, read_status)); in i40e_is_total_port_shutdown_enabled()
11850 * @pf: board private structure to initialize
11856 static int i40e_sw_init(struct i40e_pf *pf) in i40e_sw_init() argument
11862 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | in i40e_sw_init()
11867 pf->rx_itr_default = I40E_ITR_RX_DEF; in i40e_sw_init()
11868 pf->tx_itr_default = I40E_ITR_TX_DEF; in i40e_sw_init()
11870 /* Depending on PF configurations, it is possible that the RSS in i40e_sw_init()
11873 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); in i40e_sw_init()
11874 pf->alloc_rss_size = 1; in i40e_sw_init()
11875 pf->rss_table_size = pf->hw.func_caps.rss_table_size; in i40e_sw_init()
11876 pf->rss_size_max = min_t(int, pf->rss_size_max, in i40e_sw_init()
11877 pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
11878 if (pf->hw.func_caps.rss) { in i40e_sw_init()
11879 pf->flags |= I40E_FLAG_RSS_ENABLED; in i40e_sw_init()
11880 pf->alloc_rss_size = min_t(int, pf->rss_size_max, in i40e_sw_init()
11885 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { in i40e_sw_init()
11886 pf->flags |= I40E_FLAG_MFP_ENABLED; in i40e_sw_init()
11887 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); in i40e_sw_init()
11888 if (i40e_get_partition_bw_setting(pf)) { in i40e_sw_init()
11889 dev_warn(&pf->pdev->dev, in i40e_sw_init()
11892 dev_info(&pf->pdev->dev, in i40e_sw_init()
11894 pf->min_bw, pf->max_bw); in i40e_sw_init()
11897 i40e_set_partition_bw_setting(pf); in i40e_sw_init()
11901 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || in i40e_sw_init()
11902 (pf->hw.func_caps.fd_filters_best_effort > 0)) { in i40e_sw_init()
11903 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; in i40e_sw_init()
11904 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; in i40e_sw_init()
11905 if (pf->flags & I40E_FLAG_MFP_ENABLED && in i40e_sw_init()
11906 pf->hw.num_partitions > 1) in i40e_sw_init()
11907 dev_info(&pf->pdev->dev, in i40e_sw_init()
11910 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_sw_init()
11911 pf->fdir_pf_filter_count = in i40e_sw_init()
11912 pf->hw.func_caps.fd_filters_guaranteed; in i40e_sw_init()
11913 pf->hw.fdir_shared_filter_count = in i40e_sw_init()
11914 pf->hw.func_caps.fd_filters_best_effort; in i40e_sw_init()
11917 if (pf->hw.mac.type == I40E_MAC_X722) { in i40e_sw_init()
11918 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | in i40e_sw_init()
11931 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != in i40e_sw_init()
11933 dev_warn(&pf->pdev->dev, in i40e_sw_init()
11935 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; in i40e_sw_init()
11937 } else if ((pf->hw.aq.api_maj_ver > 1) || in i40e_sw_init()
11938 ((pf->hw.aq.api_maj_ver == 1) && in i40e_sw_init()
11939 (pf->hw.aq.api_min_ver > 4))) { in i40e_sw_init()
11941 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; in i40e_sw_init()
11945 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) in i40e_sw_init()
11946 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; in i40e_sw_init()
11948 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
11949 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || in i40e_sw_init()
11950 (pf->hw.aq.fw_maj_ver < 4))) { in i40e_sw_init()
11951 pf->hw_features |= I40E_HW_RESTART_AUTONEG; in i40e_sw_init()
11953 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; in i40e_sw_init()
11957 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
11958 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || in i40e_sw_init()
11959 (pf->hw.aq.fw_maj_ver < 4))) in i40e_sw_init()
11960 pf->hw_features |= I40E_HW_STOP_FW_LLDP; in i40e_sw_init()
11963 if ((pf->hw.mac.type == I40E_MAC_XL710) && in i40e_sw_init()
11964 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || in i40e_sw_init()
11965 (pf->hw.aq.fw_maj_ver >= 5))) in i40e_sw_init()
11966 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; in i40e_sw_init()
11969 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
11970 pf->hw.aq.fw_maj_ver >= 6) in i40e_sw_init()
11971 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; in i40e_sw_init()
11973 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
11974 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; in i40e_sw_init()
11975 pf->flags |= I40E_FLAG_VMDQ_ENABLED; in i40e_sw_init()
11976 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); in i40e_sw_init()
11979 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { in i40e_sw_init()
11980 pf->flags |= I40E_FLAG_IWARP_ENABLED; in i40e_sw_init()
11982 pf->num_iwarp_msix = (int)num_online_cpus() + 1; in i40e_sw_init()
11989 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
11990 pf->hw.func_caps.npar_enable && in i40e_sw_init()
11991 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) in i40e_sw_init()
11992 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; in i40e_sw_init()
11995 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { in i40e_sw_init()
11996 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; in i40e_sw_init()
11997 pf->flags |= I40E_FLAG_SRIOV_ENABLED; in i40e_sw_init()
11998 pf->num_req_vfs = min_t(int, in i40e_sw_init()
11999 pf->hw.func_caps.num_vfs, in i40e_sw_init()
12003 pf->eeprom_version = 0xDEAD; in i40e_sw_init()
12004 pf->lan_veb = I40E_NO_VEB; in i40e_sw_init()
12005 pf->lan_vsi = I40E_NO_VSI; in i40e_sw_init()
12008 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; in i40e_sw_init()
12012 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12013 pf->qp_pile = kzalloc(size, GFP_KERNEL); in i40e_sw_init()
12014 if (!pf->qp_pile) { in i40e_sw_init()
12018 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; in i40e_sw_init()
12019 pf->qp_pile->search_hint = 0; in i40e_sw_init()
12021 pf->tx_timeout_recovery_level = 1; in i40e_sw_init()
12023 if (pf->hw.mac.type != I40E_MAC_X722 && in i40e_sw_init()
12024 i40e_is_total_port_shutdown_enabled(pf)) { in i40e_sw_init()
12028 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | in i40e_sw_init()
12030 dev_info(&pf->pdev->dev, in i40e_sw_init()
12033 mutex_init(&pf->switch_mutex); in i40e_sw_init()
12041 * @pf: board private structure to initialize
12046 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) in i40e_set_ntuple() argument
12055 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) in i40e_set_ntuple()
12060 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { in i40e_set_ntuple()
12061 pf->flags |= I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12062 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12066 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_set_ntuple()
12068 i40e_fdir_filter_exit(pf); in i40e_set_ntuple()
12070 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_set_ntuple()
12071 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); in i40e_set_ntuple()
12072 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_set_ntuple()
12075 pf->fd_add_err = 0; in i40e_set_ntuple()
12076 pf->fd_atr_cnt = 0; in i40e_set_ntuple()
12078 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_set_ntuple()
12079 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_set_ntuple()
12080 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_set_ntuple()
12081 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); in i40e_set_ntuple()
12092 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut() local
12093 struct i40e_hw *hw = &pf->hw; in i40e_clear_rss_lut()
12104 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_clear_rss_lut()
12119 struct i40e_pf *pf = vsi->back; in i40e_set_features() local
12123 i40e_pf_config_rss(pf); in i40e_set_features()
12133 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) { in i40e_set_features()
12134 dev_err(&pf->pdev->dev, in i40e_set_features()
12142 need_reset = i40e_set_ntuple(pf, features); in i40e_set_features()
12145 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_set_features()
12198 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id() local
12199 struct i40e_hw *hw = &pf->hw; in i40e_get_phys_port_id()
12201 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) in i40e_get_phys_port_id()
12227 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add() local
12230 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) in i40e_ndo_fdb_add()
12270 * is to change the mode then that requires a PF reset to
12283 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink() local
12288 /* Only for PF VSI for now */ in i40e_ndo_bridge_setlink()
12289 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_setlink()
12292 /* Find the HW bridge for PF VSI */ in i40e_ndo_bridge_setlink()
12294 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_setlink()
12295 veb = pf->veb[i]; in i40e_ndo_bridge_setlink()
12313 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
12328 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
12330 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_ndo_bridge_setlink()
12331 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); in i40e_ndo_bridge_setlink()
12358 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink() local
12362 /* Only for PF VSI for now */ in i40e_ndo_bridge_getlink()
12363 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_getlink()
12366 /* Find the HW bridge for the PF VSI */ in i40e_ndo_bridge_getlink()
12368 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_getlink()
12369 veb = pf->veb[i]; in i40e_ndo_bridge_getlink()
12446 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup() local
12462 i40e_prep_for_reset(pf, true); in i40e_xdp_setup()
12470 i40e_reset_and_rebuild(pf, true, true); in i40e_xdp_setup()
12499 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf() local
12502 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_enter_busy_conf()
12518 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf() local
12520 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_exit_busy_conf()
12593 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings() local
12597 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
12600 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
12606 i40e_control_rx_q(pf, pf_q, enable); in i40e_queue_pair_toggle_rings()
12607 ret = i40e_pf_rxq_wait(pf, pf_q, enable); in i40e_queue_pair_toggle_rings()
12609 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
12624 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
12628 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
12644 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq() local
12645 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_enable_irq()
12648 if (pf->flags & I40E_FLAG_MSIX_ENABLED) in i40e_queue_pair_enable_irq()
12651 i40e_irq_dynamic_enable_icr0(pf); in i40e_queue_pair_enable_irq()
12664 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq() local
12665 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_disable_irq()
12673 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_queue_pair_disable_irq()
12678 synchronize_irq(pf->msix_entries[intpf].vector); in i40e_queue_pair_disable_irq()
12684 synchronize_irq(pf->pdev->irq); in i40e_queue_pair_disable_irq()
12819 struct i40e_pf *pf = vsi->back; in i40e_config_netdev() local
12820 struct i40e_hw *hw = &pf->hw; in i40e_config_netdev()
12859 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) in i40e_config_netdev()
12862 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; in i40e_config_netdev()
12878 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) in i40e_config_netdev()
12887 SET_NETDEV_DEV(netdev, &pf->pdev->dev); in i40e_config_netdev()
12911 pf->vsi[pf->lan_vsi]->netdev->name); in i40e_config_netdev()
12983 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb() local
12989 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
12991 dev_info(&pf->pdev->dev, in i40e_is_vsi_uplink_mode_veb()
13018 struct i40e_pf *pf = vsi->back; in i40e_add_vsi() local
13019 struct i40e_hw *hw = &pf->hw; in i40e_add_vsi()
13031 /* The PF's main VSI is already setup as part of the in i40e_add_vsi()
13036 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13037 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13039 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_add_vsi()
13042 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13043 "couldn't get PF vsi config, err %s aq_err %s\n", in i40e_add_vsi()
13044 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13045 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13046 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13055 enabled_tc = i40e_pf_get_tc_map(pf); in i40e_add_vsi()
13061 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { in i40e_add_vsi()
13063 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13064 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13072 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13074 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13075 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13076 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13083 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && in i40e_add_vsi()
13084 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ in i40e_add_vsi()
13086 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13087 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13092 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13094 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13095 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13096 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13107 * For MFP case the iSCSI PF would use this in i40e_add_vsi()
13115 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13118 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13119 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13120 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13131 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && in i40e_add_vsi()
13189 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
13213 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13214 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13215 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13237 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_add_vsi()
13243 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13245 i40e_stat_str(&pf->hw, ret), in i40e_add_vsi()
13246 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13266 struct i40e_pf *pf; in i40e_vsi_release() local
13270 pf = vsi->back; in i40e_vsi_release()
13274 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", in i40e_vsi_release()
13278 if (vsi == pf->vsi[pf->lan_vsi] && in i40e_vsi_release()
13279 !test_bit(__I40E_DOWN, pf->state)) { in i40e_vsi_release()
13280 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); in i40e_vsi_release()
13331 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_release()
13332 if (pf->vsi[i] && in i40e_vsi_release()
13333 pf->vsi[i]->uplink_seid == uplink_seid && in i40e_vsi_release()
13334 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
13339 if (!pf->veb[i]) in i40e_vsi_release()
13341 if (pf->veb[i]->uplink_seid == uplink_seid) in i40e_vsi_release()
13343 if (pf->veb[i]->seid == uplink_seid) in i40e_vsi_release()
13344 veb = pf->veb[i]; in i40e_vsi_release()
13365 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors() local
13368 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", in i40e_vsi_setup_vectors()
13374 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", in i40e_vsi_setup_vectors()
13381 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
13391 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_vsi_setup_vectors()
13394 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
13397 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
13421 struct i40e_pf *pf; in i40e_vsi_reinit_setup() local
13428 pf = vsi->back; in i40e_vsi_reinit_setup()
13430 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
13442 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
13444 dev_info(&pf->pdev->dev, in i40e_vsi_reinit_setup()
13454 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_vsi_reinit_setup()
13455 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
13456 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_vsi_reinit_setup()
13457 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_vsi_reinit_setup()
13459 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
13478 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
13486 * @pf: board private structure
13497 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, in i40e_vsi_setup() argument
13507 * - the PF's port seid in i40e_vsi_setup()
13508 * no VEB is needed because this is the PF in i40e_vsi_setup()
13514 * - seid of the PF VSI, which is what creates the first VEB in i40e_vsi_setup()
13520 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { in i40e_vsi_setup()
13521 veb = pf->veb[i]; in i40e_vsi_setup()
13526 if (!veb && uplink_seid != pf->mac_seid) { in i40e_vsi_setup()
13528 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_setup()
13529 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { in i40e_vsi_setup()
13530 vsi = pf->vsi[i]; in i40e_vsi_setup()
13535 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", in i40e_vsi_setup()
13540 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
13541 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
13544 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
13547 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { in i40e_vsi_setup()
13556 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { in i40e_vsi_setup()
13558 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; in i40e_vsi_setup()
13563 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_vsi_setup()
13564 veb = pf->veb[i]; in i40e_vsi_setup()
13567 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); in i40e_vsi_setup()
13576 v_idx = i40e_vsi_mem_alloc(pf, type); in i40e_vsi_setup()
13579 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
13586 pf->lan_vsi = v_idx; in i40e_vsi_setup()
13593 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
13595 dev_info(&pf->pdev->dev, in i40e_vsi_setup()
13645 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && in i40e_vsi_setup()
13661 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
13678 struct i40e_pf *pf = veb->pf; in i40e_veb_get_bw_info() local
13679 struct i40e_hw *hw = &pf->hw; in i40e_veb_get_bw_info()
13687 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
13689 i40e_stat_str(&pf->hw, ret), in i40e_veb_get_bw_info()
13690 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
13697 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
13699 i40e_stat_str(&pf->hw, ret), in i40e_veb_get_bw_info()
13700 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
13722 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13723 * @pf: board private structure
13726 * On success: returns vsi index in PF (positive)
13728 static int i40e_veb_mem_alloc(struct i40e_pf *pf) in i40e_veb_mem_alloc() argument
13734 /* Need to protect the allocation of switch elements at the PF level */ in i40e_veb_mem_alloc()
13735 mutex_lock(&pf->switch_mutex); in i40e_veb_mem_alloc()
13744 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) in i40e_veb_mem_alloc()
13756 veb->pf = pf; in i40e_veb_mem_alloc()
13760 pf->veb[i] = veb; in i40e_veb_mem_alloc()
13763 mutex_unlock(&pf->switch_mutex); in i40e_veb_mem_alloc()
13776 struct i40e_pf *pf = branch->pf; in i40e_switch_branch_release() local
13783 if (!pf->veb[i]) in i40e_switch_branch_release()
13785 if (pf->veb[i]->uplink_seid == branch->seid) in i40e_switch_branch_release()
13786 i40e_switch_branch_release(pf->veb[i]); in i40e_switch_branch_release()
13794 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_switch_branch_release()
13795 if (!pf->vsi[i]) in i40e_switch_branch_release()
13797 if (pf->vsi[i]->uplink_seid == branch_seid && in i40e_switch_branch_release()
13798 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
13799 i40e_vsi_release(pf->vsi[i]); in i40e_switch_branch_release()
13808 if (pf->veb[veb_idx]) in i40e_switch_branch_release()
13809 i40e_veb_release(pf->veb[veb_idx]); in i40e_switch_branch_release()
13821 if (veb->pf) { in i40e_veb_clear()
13822 struct i40e_pf *pf = veb->pf; in i40e_veb_clear() local
13824 mutex_lock(&pf->switch_mutex); in i40e_veb_clear()
13825 if (pf->veb[veb->idx] == veb) in i40e_veb_clear()
13826 pf->veb[veb->idx] = NULL; in i40e_veb_clear()
13827 mutex_unlock(&pf->switch_mutex); in i40e_veb_clear()
13840 struct i40e_pf *pf; in i40e_veb_release() local
13843 pf = veb->pf; in i40e_veb_release()
13846 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_veb_release()
13847 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { in i40e_veb_release()
13849 vsi = pf->vsi[i]; in i40e_veb_release()
13853 dev_info(&pf->pdev->dev, in i40e_veb_release()
13863 if (veb->uplink_seid == pf->mac_seid) in i40e_veb_release()
13869 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_veb_release()
13870 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; in i40e_veb_release()
13873 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_veb_release()
13884 struct i40e_pf *pf = veb->pf; in i40e_add_veb() local
13885 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); in i40e_add_veb()
13888 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, in i40e_add_veb()
13894 dev_info(&pf->pdev->dev, in i40e_add_veb()
13896 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
13897 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
13902 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, in i40e_add_veb()
13905 dev_info(&pf->pdev->dev, in i40e_add_veb()
13907 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
13908 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
13913 dev_info(&pf->pdev->dev, in i40e_add_veb()
13915 i40e_stat_str(&pf->hw, ret), in i40e_add_veb()
13916 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
13917 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_add_veb()
13930 * @pf: board private structure
13944 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, in i40e_veb_setup() argument
13955 dev_info(&pf->pdev->dev, in i40e_veb_setup()
13962 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) in i40e_veb_setup()
13963 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) in i40e_veb_setup()
13965 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { in i40e_veb_setup()
13966 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", in i40e_veb_setup()
13971 if (uplink_seid && uplink_seid != pf->mac_seid) { in i40e_veb_setup()
13973 if (pf->veb[veb_idx] && in i40e_veb_setup()
13974 pf->veb[veb_idx]->seid == uplink_seid) { in i40e_veb_setup()
13975 uplink_veb = pf->veb[veb_idx]; in i40e_veb_setup()
13980 dev_info(&pf->pdev->dev, in i40e_veb_setup()
13987 veb_idx = i40e_veb_mem_alloc(pf); in i40e_veb_setup()
13990 veb = pf->veb[veb_idx]; in i40e_veb_setup()
13997 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); in i40e_veb_setup()
14000 if (vsi_idx == pf->lan_vsi) in i40e_veb_setup()
14001 pf->lan_veb = veb->idx; in i40e_veb_setup()
14012 * i40e_setup_pf_switch_element - set PF vars based on switch type
14013 * @pf: board private structure
14020 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, in i40e_setup_pf_switch_element() argument
14030 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14036 pf->mac_seid = seid; in i40e_setup_pf_switch_element()
14040 if (uplink_seid != pf->mac_seid) in i40e_setup_pf_switch_element()
14042 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14047 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { in i40e_setup_pf_switch_element()
14048 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14052 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14053 v = i40e_veb_mem_alloc(pf); in i40e_setup_pf_switch_element()
14056 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14059 if (pf->lan_veb >= I40E_MAX_VEB) in i40e_setup_pf_switch_element()
14062 pf->veb[pf->lan_veb]->seid = seid; in i40e_setup_pf_switch_element()
14063 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; in i40e_setup_pf_switch_element()
14064 pf->veb[pf->lan_veb]->pf = pf; in i40e_setup_pf_switch_element()
14065 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; in i40e_setup_pf_switch_element()
14071 * the PF's VSI in i40e_setup_pf_switch_element()
14073 pf->mac_seid = uplink_seid; in i40e_setup_pf_switch_element()
14074 pf->pf_seid = downlink_seid; in i40e_setup_pf_switch_element()
14075 pf->main_vsi_seid = seid; in i40e_setup_pf_switch_element()
14077 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14079 pf->pf_seid, pf->main_vsi_seid); in i40e_setup_pf_switch_element()
14090 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", in i40e_setup_pf_switch_element()
14098 * @pf: board private structure
14104 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) in i40e_fetch_switch_configuration() argument
14120 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, in i40e_fetch_switch_configuration()
14124 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
14126 i40e_stat_str(&pf->hw, ret), in i40e_fetch_switch_configuration()
14127 i40e_aq_str(&pf->hw, in i40e_fetch_switch_configuration()
14128 pf->hw.aq.asq_last_status)); in i40e_fetch_switch_configuration()
14137 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
14145 i40e_setup_pf_switch_element(pf, ele, num_reported, in i40e_fetch_switch_configuration()
14156 * @pf: board private structure
14161 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) in i40e_setup_pf_switch() argument
14167 ret = i40e_fetch_switch_configuration(pf, false); in i40e_setup_pf_switch()
14169 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
14171 i40e_stat_str(&pf->hw, ret), in i40e_setup_pf_switch()
14172 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
14175 i40e_pf_reset_stats(pf); in i40e_setup_pf_switch()
14183 if ((pf->hw.pf_id == 0) && in i40e_setup_pf_switch()
14184 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { in i40e_setup_pf_switch()
14186 pf->last_sw_conf_flags = flags; in i40e_setup_pf_switch()
14189 if (pf->hw.pf_id == 0) { in i40e_setup_pf_switch()
14193 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, in i40e_setup_pf_switch()
14195 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_setup_pf_switch()
14196 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
14198 i40e_stat_str(&pf->hw, ret), in i40e_setup_pf_switch()
14199 i40e_aq_str(&pf->hw, in i40e_setup_pf_switch()
14200 pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
14203 pf->last_sw_conf_valid_flags = valid_flags; in i40e_setup_pf_switch()
14207 if (pf->lan_vsi == I40E_NO_VSI || reinit) { in i40e_setup_pf_switch()
14211 /* Set up the PF VSI associated with the PF's main VSI in i40e_setup_pf_switch()
14214 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_setup_pf_switch()
14215 uplink_seid = pf->veb[pf->lan_veb]->seid; in i40e_setup_pf_switch()
14217 uplink_seid = pf->mac_seid; in i40e_setup_pf_switch()
14218 if (pf->lan_vsi == I40E_NO_VSI) in i40e_setup_pf_switch()
14219 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); in i40e_setup_pf_switch()
14221 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
14223 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); in i40e_setup_pf_switch()
14224 i40e_cloud_filter_exit(pf); in i40e_setup_pf_switch()
14225 i40e_fdir_teardown(pf); in i40e_setup_pf_switch()
14230 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_setup_pf_switch()
14232 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
14233 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_setup_pf_switch()
14234 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_setup_pf_switch()
14236 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
14238 i40e_fdir_sb_setup(pf); in i40e_setup_pf_switch()
14240 /* Setup static PF queue filter control settings */ in i40e_setup_pf_switch()
14241 ret = i40e_setup_pf_filter_control(pf); in i40e_setup_pf_switch()
14243 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", in i40e_setup_pf_switch()
14251 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) in i40e_setup_pf_switch()
14252 i40e_pf_config_rss(pf); in i40e_setup_pf_switch()
14255 i40e_link_event(pf); in i40e_setup_pf_switch()
14258 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & in i40e_setup_pf_switch()
14261 i40e_ptp_init(pf); in i40e_setup_pf_switch()
14264 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); in i40e_setup_pf_switch()
14271 * @pf: board private structure
14273 static void i40e_determine_queue_usage(struct i40e_pf *pf) in i40e_determine_queue_usage() argument
14278 pf->num_lan_qps = 0; in i40e_determine_queue_usage()
14284 queues_left = pf->hw.func_caps.num_tx_qp; in i40e_determine_queue_usage()
14287 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { in i40e_determine_queue_usage()
14288 /* one qp for PF, no queues for anything else */ in i40e_determine_queue_usage()
14290 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
14293 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
14301 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
14302 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
14306 /* one qp for PF */ in i40e_determine_queue_usage()
14307 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
14308 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
14310 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | in i40e_determine_queue_usage()
14316 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
14319 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && in i40e_determine_queue_usage()
14321 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | in i40e_determine_queue_usage()
14323 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); in i40e_determine_queue_usage()
14327 q_max = max_t(int, pf->rss_size_max, num_online_cpus()); in i40e_determine_queue_usage()
14328 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); in i40e_determine_queue_usage()
14329 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); in i40e_determine_queue_usage()
14330 pf->num_lan_qps = q_max; in i40e_determine_queue_usage()
14332 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
14335 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_determine_queue_usage()
14339 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; in i40e_determine_queue_usage()
14340 pf->flags |= I40E_FLAG_FD_SB_INACTIVE; in i40e_determine_queue_usage()
14341 …dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n… in i40e_determine_queue_usage()
14345 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_determine_queue_usage()
14346 pf->num_vf_qps && pf->num_req_vfs && queues_left) { in i40e_determine_queue_usage()
14347 pf->num_req_vfs = min_t(int, pf->num_req_vfs, in i40e_determine_queue_usage()
14348 (queues_left / pf->num_vf_qps)); in i40e_determine_queue_usage()
14349 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); in i40e_determine_queue_usage()
14352 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && in i40e_determine_queue_usage()
14353 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { in i40e_determine_queue_usage()
14354 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, in i40e_determine_queue_usage()
14355 (queues_left / pf->num_vmdq_qps)); in i40e_determine_queue_usage()
14356 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); in i40e_determine_queue_usage()
14359 pf->queues_left = queues_left; in i40e_determine_queue_usage()
14360 dev_dbg(&pf->pdev->dev, in i40e_determine_queue_usage()
14362 pf->hw.func_caps.num_tx_qp, in i40e_determine_queue_usage()
14363 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), in i40e_determine_queue_usage()
14364 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, in i40e_determine_queue_usage()
14365 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, in i40e_determine_queue_usage()
14370 * i40e_setup_pf_filter_control - Setup PF static filter control
14371 * @pf: PF to be setup
14373 * i40e_setup_pf_filter_control sets up a PF's initial filter control
14374 * settings. If PE/FCoE are enabled then it will also set the per PF
14376 * ethertype and macvlan type filter settings for the pf.
14380 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) in i40e_setup_pf_filter_control() argument
14382 struct i40e_filter_control_settings *settings = &pf->filter_settings; in i40e_setup_pf_filter_control()
14387 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) in i40e_setup_pf_filter_control()
14390 /* Ethtype and MACVLAN filters enabled for PF */ in i40e_setup_pf_filter_control()
14394 if (i40e_set_filter_control(&pf->hw, settings)) in i40e_setup_pf_filter_control()
14402 static void i40e_print_features(struct i40e_pf *pf) in i40e_print_features() argument
14404 struct i40e_hw *hw = &pf->hw; in i40e_print_features()
14412 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); in i40e_print_features()
14414 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); in i40e_print_features()
14417 pf->hw.func_caps.num_vsis, in i40e_print_features()
14418 pf->vsi[pf->lan_vsi]->num_queue_pairs); in i40e_print_features()
14419 if (pf->flags & I40E_FLAG_RSS_ENABLED) in i40e_print_features()
14421 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) in i40e_print_features()
14423 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { in i40e_print_features()
14427 if (pf->flags & I40E_FLAG_DCB_CAPABLE) in i40e_print_features()
14431 if (pf->flags & I40E_FLAG_PTP) in i40e_print_features()
14433 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) in i40e_print_features()
14438 dev_info(&pf->pdev->dev, "%s\n", buf); in i40e_print_features()
14446 * @pf: board private structure
14453 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) in i40e_get_platform_mac_addr() argument
14455 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) in i40e_get_platform_mac_addr()
14456 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); in i40e_get_platform_mac_addr()
14484 * @pf: board private structure
14491 static bool i40e_check_recovery_mode(struct i40e_pf *pf) in i40e_check_recovery_mode() argument
14493 u32 val = rd32(&pf->hw, I40E_GL_FWSTS); in i40e_check_recovery_mode()
14496 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); in i40e_check_recovery_mode()
14497 …dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for detai… in i40e_check_recovery_mode()
14498 set_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_check_recovery_mode()
14502 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_check_recovery_mode()
14503 …dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full … in i40e_check_recovery_mode()
14510 * @pf: board private structure
14519 * state is to issue a series of pf-resets and check a return value.
14520 * If a PF reset returns success then the firmware could be in recovery
14529 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) in i40e_pf_loop_reset() argument
14531 /* wait max 10 seconds for PF reset to succeed */ in i40e_pf_loop_reset()
14534 struct i40e_hw *hw = &pf->hw; in i40e_pf_loop_reset()
14544 pf->pfr_count++; in i40e_pf_loop_reset()
14546 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); in i40e_pf_loop_reset()
14553 * @pf: board private structure
14562 static bool i40e_check_fw_empr(struct i40e_pf *pf) in i40e_check_fw_empr() argument
14564 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & in i40e_check_fw_empr()
14571 * i40e_handle_resets - handle EMP resets and PF resets
14572 * @pf: board private structure
14574 * Handle both EMP resets and PF resets and conclude whether there are
14581 static i40e_status i40e_handle_resets(struct i40e_pf *pf) in i40e_handle_resets() argument
14583 const i40e_status pfr = i40e_pf_loop_reset(pf); in i40e_handle_resets()
14584 const bool is_empr = i40e_check_fw_empr(pf); in i40e_handle_resets()
14587 …dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several … in i40e_handle_resets()
14594 * @pf: board private structure
14602 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) in i40e_init_recovery_mode() argument
14608 pci_save_state(pf->pdev); in i40e_init_recovery_mode()
14611 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_init_recovery_mode()
14612 pf->service_timer_period = HZ; in i40e_init_recovery_mode()
14614 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_init_recovery_mode()
14615 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_init_recovery_mode()
14617 err = i40e_init_interrupt_scheme(pf); in i40e_init_recovery_mode()
14626 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_init_recovery_mode()
14627 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_init_recovery_mode()
14629 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_init_recovery_mode()
14631 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */ in i40e_init_recovery_mode()
14632 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
14634 if (!pf->vsi) { in i40e_init_recovery_mode()
14642 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); in i40e_init_recovery_mode()
14645 pf->lan_vsi = v_idx; in i40e_init_recovery_mode()
14646 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
14657 i40e_dbg_pf_init(pf); in i40e_init_recovery_mode()
14659 err = i40e_setup_misc_vector_for_recovery_mode(pf); in i40e_init_recovery_mode()
14664 i40e_send_version(pf); in i40e_init_recovery_mode()
14667 mod_timer(&pf->service_timer, in i40e_init_recovery_mode()
14668 round_jiffies(jiffies + pf->service_timer_period)); in i40e_init_recovery_mode()
14673 i40e_reset_interrupt_capability(pf); in i40e_init_recovery_mode()
14674 del_timer_sync(&pf->service_timer); in i40e_init_recovery_mode()
14677 pci_disable_pcie_error_reporting(pf->pdev); in i40e_init_recovery_mode()
14678 pci_release_mem_regions(pf->pdev); in i40e_init_recovery_mode()
14679 pci_disable_device(pf->pdev); in i40e_init_recovery_mode()
14680 kfree(pf); in i40e_init_recovery_mode()
14690 * i40e_probe initializes a PF identified by a pci_dev structure.
14691 * The OS initialization, configuring of the PF private structure,
14699 struct i40e_pf *pf; in i40e_probe() local
14740 pf = kzalloc(sizeof(*pf), GFP_KERNEL); in i40e_probe()
14741 if (!pf) { in i40e_probe()
14745 pf->next_vsi = 0; in i40e_probe()
14746 pf->pdev = pdev; in i40e_probe()
14747 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
14749 hw = &pf->hw; in i40e_probe()
14750 hw->back = pf; in i40e_probe()
14752 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), in i40e_probe()
14759 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { in i40e_probe()
14761 pf->ioremap_len); in i40e_probe()
14765 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); in i40e_probe()
14770 pf->ioremap_len, err); in i40e_probe()
14781 pf->instance = pfs_found; in i40e_probe()
14790 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_probe()
14791 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_probe()
14792 INIT_LIST_HEAD(&pf->ddp_old_prof); in i40e_probe()
14800 pf->msg_enable = netif_msg_init(debug, in i40e_probe()
14805 pf->hw.debug_mask = debug; in i40e_probe()
14813 pf->corer_count++; in i40e_probe()
14818 /* Reset here to make sure all is clean and to define PF 'n' */ in i40e_probe()
14828 err = i40e_handle_resets(pf); in i40e_probe()
14832 i40e_check_recovery_mode(pf); in i40e_probe()
14838 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; in i40e_probe()
14840 snprintf(pf->int_name, sizeof(pf->int_name) - 1, in i40e_probe()
14842 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); in i40e_probe()
14852 pf->hw.fc.requested_mode = I40E_FC_NONE; in i40e_probe()
14894 i40e_verify_eeprom(pf); in i40e_probe()
14902 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); in i40e_probe()
14906 err = i40e_sw_init(pf); in i40e_probe()
14912 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_probe()
14913 return i40e_init_recovery_mode(pf, hw); in i40e_probe()
14933 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { in i40e_probe()
14939 i40e_get_platform_mac_addr(pdev, pf); in i40e_probe()
14950 pf->hw_features |= I40E_HW_PORT_ID_VALID; in i40e_probe()
14952 pci_set_drvdata(pdev, pf); in i40e_probe()
14956 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? in i40e_probe()
14964 err = i40e_init_pf_dcb(pf); in i40e_probe()
14967 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED); in i40e_probe()
14973 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_probe()
14974 pf->service_timer_period = HZ; in i40e_probe()
14976 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_probe()
14977 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_probe()
14982 pf->wol_en = false; in i40e_probe()
14984 pf->wol_en = true; in i40e_probe()
14985 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_probe()
14988 i40e_determine_queue_usage(pf); in i40e_probe()
14989 err = i40e_init_interrupt_scheme(pf); in i40e_probe()
14993 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; in i40e_probe()
14994 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; in i40e_probe()
14995 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in i40e_probe()
14996 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; in i40e_probe()
14997 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; in i40e_probe()
14998 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | in i40e_probe()
15006 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_probe()
15007 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_probe()
15009 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_probe()
15010 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in i40e_probe()
15011 dev_warn(&pf->pdev->dev, in i40e_probe()
15013 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in i40e_probe()
15014 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in i40e_probe()
15017 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ in i40e_probe()
15018 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
15020 if (!pf->vsi) { in i40e_probe()
15027 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
15028 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
15029 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
15031 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; in i40e_probe()
15034 err = i40e_setup_pf_switch(pf, false); in i40e_probe()
15039 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); in i40e_probe()
15044 dev_dbg(&pf->pdev->dev, in i40e_probe()
15049 dev_dbg(&pf->pdev->dev, in i40e_probe()
15054 dev_dbg(&pf->pdev->dev, in i40e_probe()
15060 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_probe()
15061 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { in i40e_probe()
15062 i40e_vsi_open(pf->vsi[i]); in i40e_probe()
15070 err = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_probe()
15075 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", in i40e_probe()
15076 i40e_stat_str(&pf->hw, err), in i40e_probe()
15077 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
15090 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { in i40e_probe()
15092 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_probe()
15094 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", in i40e_probe()
15095 i40e_stat_str(&pf->hw, err), in i40e_probe()
15096 i40e_aq_str(&pf->hw, in i40e_probe()
15097 pf->hw.aq.asq_last_status)); in i40e_probe()
15103 clear_bit(__I40E_DOWN, pf->state); in i40e_probe()
15110 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_probe()
15111 err = i40e_setup_misc_vector(pf); in i40e_probe()
15121 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && in i40e_probe()
15122 (pf->flags & I40E_FLAG_MSIX_ENABLED) && in i40e_probe()
15123 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
15133 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); in i40e_probe()
15142 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
15143 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_probe()
15144 pf->num_iwarp_msix, in i40e_probe()
15146 if (pf->iwarp_base_vector < 0) { in i40e_probe()
15149 pf->num_iwarp_msix, pf->iwarp_base_vector); in i40e_probe()
15150 pf->flags &= ~I40E_FLAG_IWARP_ENABLED; in i40e_probe()
15154 i40e_dbg_pf_init(pf); in i40e_probe()
15157 i40e_send_version(pf); in i40e_probe()
15160 mod_timer(&pf->service_timer, in i40e_probe()
15161 round_jiffies(jiffies + pf->service_timer_period)); in i40e_probe()
15163 /* add this PF to client device list and launch a client service task */ in i40e_probe()
15164 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_probe()
15165 err = i40e_lan_add_device(pf); in i40e_probe()
15167 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", in i40e_probe()
15177 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { in i40e_probe()
15184 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, in i40e_probe()
15225 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", in i40e_probe()
15226 i40e_stat_str(&pf->hw, err), in i40e_probe()
15227 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
15228 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; in i40e_probe()
15231 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); in i40e_probe()
15236 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", in i40e_probe()
15237 i40e_stat_str(&pf->hw, err), in i40e_probe()
15238 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
15242 val = (rd32(&pf->hw, I40E_PRTGL_SAH) & in i40e_probe()
15251 * PF/VF VSIs. in i40e_probe()
15254 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_probe()
15255 pf->main_vsi_seid); in i40e_probe()
15257 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || in i40e_probe()
15258 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) in i40e_probe()
15259 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; in i40e_probe()
15260 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) in i40e_probe()
15261 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; in i40e_probe()
15263 i40e_print_features(pf); in i40e_probe()
15269 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
15270 i40e_clear_interrupt_scheme(pf); in i40e_probe()
15271 kfree(pf->vsi); in i40e_probe()
15273 i40e_reset_interrupt_capability(pf); in i40e_probe()
15274 del_timer_sync(&pf->service_timer); in i40e_probe()
15279 kfree(pf->qp_pile); in i40e_probe()
15285 kfree(pf); in i40e_probe()
15306 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_remove() local
15307 struct i40e_hw *hw = &pf->hw; in i40e_remove()
15311 i40e_dbg_pf_exit(pf); in i40e_remove()
15313 i40e_ptp_stop(pf); in i40e_remove()
15319 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_remove()
15322 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { in i40e_remove()
15323 set_bit(__I40E_VF_RESETS_DISABLED, pf->state); in i40e_remove()
15324 i40e_free_vfs(pf); in i40e_remove()
15325 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; in i40e_remove()
15328 set_bit(__I40E_SUSPENDED, pf->state); in i40e_remove()
15329 set_bit(__I40E_DOWN, pf->state); in i40e_remove()
15330 if (pf->service_timer.function) in i40e_remove()
15331 del_timer_sync(&pf->service_timer); in i40e_remove()
15332 if (pf->service_task.func) in i40e_remove()
15333 cancel_work_sync(&pf->service_task); in i40e_remove()
15335 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_remove()
15336 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove()
15338 /* We know that we have allocated only one vsi for this PF, in i40e_remove()
15351 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_remove()
15353 i40e_fdir_teardown(pf); in i40e_remove()
15356 * This will leave only the PF's VSI remaining. in i40e_remove()
15359 if (!pf->veb[i]) in i40e_remove()
15362 if (pf->veb[i]->uplink_seid == pf->mac_seid || in i40e_remove()
15363 pf->veb[i]->uplink_seid == 0) in i40e_remove()
15364 i40e_switch_branch_release(pf->veb[i]); in i40e_remove()
15367 /* Now we can shutdown the PF's VSI, just before we kill in i40e_remove()
15370 if (pf->vsi[pf->lan_vsi]) in i40e_remove()
15371 i40e_vsi_release(pf->vsi[pf->lan_vsi]); in i40e_remove()
15373 i40e_cloud_filter_exit(pf); in i40e_remove()
15376 if (pf->flags & I40E_FLAG_IWARP_ENABLED) { in i40e_remove()
15377 ret_code = i40e_lan_del_device(pf); in i40e_remove()
15394 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_remove()
15395 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_remove()
15396 free_irq(pf->pdev->irq, pf); in i40e_remove()
15407 i40e_clear_interrupt_scheme(pf); in i40e_remove()
15408 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_remove()
15409 if (pf->vsi[i]) { in i40e_remove()
15410 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_remove()
15411 i40e_vsi_clear_rings(pf->vsi[i]); in i40e_remove()
15412 i40e_vsi_clear(pf->vsi[i]); in i40e_remove()
15413 pf->vsi[i] = NULL; in i40e_remove()
15419 kfree(pf->veb[i]); in i40e_remove()
15420 pf->veb[i] = NULL; in i40e_remove()
15423 kfree(pf->qp_pile); in i40e_remove()
15424 kfree(pf->vsi); in i40e_remove()
15427 kfree(pf); in i40e_remove()
15446 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_detected() local
15450 if (!pf) { in i40e_pci_error_detected()
15457 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_detected()
15458 i40e_prep_for_reset(pf, false); in i40e_pci_error_detected()
15475 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_slot_reset() local
15490 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_pci_error_slot_reset()
15506 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_reset_prepare() local
15508 i40e_prep_for_reset(pf, false); in i40e_pci_error_reset_prepare()
15517 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_reset_done() local
15519 i40e_reset_and_rebuild(pf, false, false); in i40e_pci_error_reset_done()
15531 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_pci_error_resume() local
15534 if (test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_resume()
15537 i40e_handle_reset_warning(pf, false); in i40e_pci_error_resume()
15543 * @pf: pointer to i40e_pf struct
15545 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) in i40e_enable_mc_magic_wake() argument
15547 struct i40e_hw *hw = &pf->hw; in i40e_enable_mc_magic_wake()
15553 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { in i40e_enable_mc_magic_wake()
15555 pf->vsi[pf->lan_vsi]->netdev->dev_addr); in i40e_enable_mc_magic_wake()
15557 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
15573 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
15583 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
15593 struct i40e_pf *pf = pci_get_drvdata(pdev); in i40e_shutdown() local
15594 struct i40e_hw *hw = &pf->hw; in i40e_shutdown()
15596 set_bit(__I40E_SUSPENDED, pf->state); in i40e_shutdown()
15597 set_bit(__I40E_DOWN, pf->state); in i40e_shutdown()
15599 del_timer_sync(&pf->service_timer); in i40e_shutdown()
15600 cancel_work_sync(&pf->service_task); in i40e_shutdown()
15601 i40e_cloud_filter_exit(pf); in i40e_shutdown()
15602 i40e_fdir_teardown(pf); in i40e_shutdown()
15607 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_shutdown()
15609 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_shutdown()
15610 i40e_enable_mc_magic_wake(pf); in i40e_shutdown()
15612 i40e_prep_for_reset(pf, false); in i40e_shutdown()
15615 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_shutdown()
15617 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_shutdown()
15620 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_shutdown()
15621 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) in i40e_shutdown()
15622 free_irq(pf->pdev->irq, pf); in i40e_shutdown()
15629 i40e_clear_interrupt_scheme(pf); in i40e_shutdown()
15633 pci_wake_from_d3(pdev, pf->wol_en); in i40e_shutdown()
15644 struct i40e_pf *pf = dev_get_drvdata(dev); in i40e_suspend() local
15645 struct i40e_hw *hw = &pf->hw; in i40e_suspend()
15648 if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) in i40e_suspend()
15651 set_bit(__I40E_DOWN, pf->state); in i40e_suspend()
15654 del_timer_sync(&pf->service_timer); in i40e_suspend()
15655 cancel_work_sync(&pf->service_task); in i40e_suspend()
15660 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_suspend()
15662 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) in i40e_suspend()
15663 i40e_enable_mc_magic_wake(pf); in i40e_suspend()
15671 i40e_prep_for_reset(pf, true); in i40e_suspend()
15673 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_suspend()
15674 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_suspend()
15681 i40e_clear_interrupt_scheme(pf); in i40e_suspend()
15694 struct i40e_pf *pf = dev_get_drvdata(dev); in i40e_resume() local
15698 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_resume()
15709 err = i40e_restore_interrupt_scheme(pf); in i40e_resume()
15715 clear_bit(__I40E_DOWN, pf->state); in i40e_resume()
15716 i40e_reset_and_rebuild(pf, false, true); in i40e_resume()
15721 clear_bit(__I40E_SUSPENDED, pf->state); in i40e_resume()
15724 mod_timer(&pf->service_timer, in i40e_resume()
15725 round_jiffies(jiffies + pf->service_timer_period)); in i40e_resume()