Lines Matching +full:precondition +full:- +full:timeout
1 // SPDX-License-Identifier: GPL-2.0
39 static int debug = -1;
52 * ice_hw_to_dev - Get device pointer from the hardware structure
63 return &pf->pdev->dev; in ice_hw_to_dev()
85 return dev && (dev->netdev_ops == &ice_netdev_ops); in netif_is_ice()
89 * ice_get_tx_pending - returns number of Tx descriptors not processed
96 head = ring->next_to_clean; in ice_get_tx_pending()
97 tail = ring->next_to_use; in ice_get_tx_pending()
101 tail - head : (tail + ring->count - head); in ice_get_tx_pending()
106 * ice_check_for_hang_subtask - check for and recover hung queues
118 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
119 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
123 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) in ice_check_for_hang_subtask()
126 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) in ice_check_for_hang_subtask()
129 hw = &vsi->back->hw; in ice_check_for_hang_subtask()
132 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask()
139 if (tx_ring->desc) { in ice_check_for_hang_subtask()
147 packets = tx_ring->stats.pkts & INT_MAX; in ice_check_for_hang_subtask()
148 if (tx_ring->tx_stats.prev_pkt == packets) { in ice_check_for_hang_subtask()
150 ice_trigger_sw_intr(hw, tx_ring->q_vector); in ice_check_for_hang_subtask()
158 tx_ring->tx_stats.prev_pkt = in ice_check_for_hang_subtask()
159 ice_get_tx_pending(tx_ring) ? packets : -1; in ice_check_for_hang_subtask()
165 * ice_init_mac_fltr - Set initial MAC filters
179 return -EINVAL; in ice_init_mac_fltr()
181 perm_addr = vsi->port_info->mac.perm_addr; in ice_init_mac_fltr()
186 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
198 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_sync_list()
200 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, in ice_add_mac_to_sync_list()
202 return -EINVAL; in ice_add_mac_to_sync_list()
208 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
220 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_unsync_list()
227 if (ether_addr_equal(addr, netdev->dev_addr)) in ice_add_mac_to_unsync_list()
230 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, in ice_add_mac_to_unsync_list()
232 return -EINVAL; in ice_add_mac_to_unsync_list()
238 * ice_vsi_fltr_changed - check if filter state changed
245 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || in ice_vsi_fltr_changed()
246 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_fltr_changed()
250 * ice_set_promisc - Enable promiscuous mode for a given PF
259 if (vsi->type != ICE_VSI_PF) in ice_set_promisc()
264 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_set_promisc()
267 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_set_promisc()
270 if (status && status != -EEXIST) in ice_set_promisc()
277 * ice_clear_promisc - Disable promiscuous mode for a given PF
286 if (vsi->type != ICE_VSI_PF) in ice_clear_promisc()
291 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_clear_promisc()
294 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_clear_promisc()
302 * ice_get_devlink_port - Get devlink port from netdev
312 return &pf->devlink_port; in ice_get_devlink_port()
316 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
324 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_sync_fltr()
325 struct net_device *netdev = vsi->netdev; in ice_vsi_sync_fltr()
327 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr()
328 struct ice_hw *hw = &pf->hw; in ice_vsi_sync_fltr()
332 if (!vsi->netdev) in ice_vsi_sync_fltr()
333 return -EINVAL; in ice_vsi_sync_fltr()
335 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vsi_sync_fltr()
338 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in ice_vsi_sync_fltr()
339 vsi->current_netdev_flags = vsi->netdev->flags; in ice_vsi_sync_fltr()
341 INIT_LIST_HEAD(&vsi->tmp_sync_list); in ice_vsi_sync_fltr()
342 INIT_LIST_HEAD(&vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
345 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
346 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
359 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
360 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
364 if (err == -ENOMEM) in ice_vsi_sync_fltr()
369 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
370 ice_fltr_free_list(dev, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
375 if (err && err != -EEXIST) { in ice_vsi_sync_fltr()
381 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && in ice_vsi_sync_fltr()
383 vsi->state)) { in ice_vsi_sync_fltr()
386 vsi->vsi_num); in ice_vsi_sync_fltr()
394 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vsi_sync_fltr()
397 vsi->current_netdev_flags &= ~IFF_ALLMULTI; in ice_vsi_sync_fltr()
401 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ in ice_vsi_sync_fltr()
404 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vsi_sync_fltr()
411 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { in ice_vsi_sync_fltr()
412 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
413 if (vsi->current_netdev_flags & IFF_PROMISC) { in ice_vsi_sync_fltr()
415 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { in ice_vsi_sync_fltr()
417 if (err && err != -EEXIST) { in ice_vsi_sync_fltr()
419 err, vsi->vsi_num); in ice_vsi_sync_fltr()
420 vsi->current_netdev_flags &= in ice_vsi_sync_fltr()
425 vlan_ops->dis_rx_filtering(vsi); in ice_vsi_sync_fltr()
433 err, vsi->vsi_num); in ice_vsi_sync_fltr()
434 vsi->current_netdev_flags |= in ice_vsi_sync_fltr()
438 if (vsi->netdev->features & in ice_vsi_sync_fltr()
440 vlan_ops->ena_rx_filtering(vsi); in ice_vsi_sync_fltr()
447 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
451 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
452 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
454 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vsi_sync_fltr()
459 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
466 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) in ice_sync_fltr_subtask()
469 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
472 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
473 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
475 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
481 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
491 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
492 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
495 pf->pf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
498 pf->vf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
502 * ice_clear_sw_switch_recipes - clear switch recipes
506 * rules (especially advanced rules) need to be restored, either re-read from
515 recp = pf->hw.switch_info->recp_list; in ice_clear_sw_switch_recipes()
521 * ice_prepare_for_reset - prep for reset
530 struct ice_hw *hw = &pf->hw; in ice_prepare_for_reset()
538 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) in ice_prepare_for_reset()
544 if (ice_check_sq_alive(hw, &hw->mailboxq)) in ice_prepare_for_reset()
548 mutex_lock(&pf->vfs.table_lock); in ice_prepare_for_reset()
551 mutex_unlock(&pf->vfs.table_lock); in ice_prepare_for_reset()
566 vsi->orig_rss_size = 0; in ice_prepare_for_reset()
568 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_prepare_for_reset()
570 vsi->old_ena_tc = vsi->all_enatc; in ice_prepare_for_reset()
571 vsi->old_numtc = vsi->all_numtc; in ice_prepare_for_reset()
578 vsi->old_ena_tc = 0; in ice_prepare_for_reset()
579 vsi->all_enatc = 0; in ice_prepare_for_reset()
580 vsi->old_numtc = 0; in ice_prepare_for_reset()
581 vsi->all_numtc = 0; in ice_prepare_for_reset()
582 vsi->req_txq = 0; in ice_prepare_for_reset()
583 vsi->req_rxq = 0; in ice_prepare_for_reset()
584 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_prepare_for_reset()
585 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); in ice_prepare_for_reset()
595 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_prepare_for_reset()
601 if (hw->port_info) in ice_prepare_for_reset()
602 ice_sched_clear_port(hw->port_info); in ice_prepare_for_reset()
606 set_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_prepare_for_reset()
610 * ice_do_reset - Initiate one of many types of resets
617 struct ice_hw *hw = &pf->hw; in ice_do_reset()
626 set_bit(ICE_RESET_FAILED, pf->state); in ice_do_reset()
627 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_do_reset()
628 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
629 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
630 clear_bit(ICE_CORER_REQ, pf->state); in ice_do_reset()
631 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_do_reset()
632 wake_up(&pf->reset_wait_queue); in ice_do_reset()
637 * interrupt. So for PFR, rebuild after the reset and clear the reset- in ice_do_reset()
641 pf->pfr_count++; in ice_do_reset()
643 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
644 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
645 wake_up(&pf->reset_wait_queue); in ice_do_reset()
651 * ice_reset_subtask - Set up for resetting the device and driver
660 * of reset is pending and sets bits in pf->state indicating the reset in ice_reset_subtask()
662 * prepare for pending reset if not already (for PF software-initiated in ice_reset_subtask()
668 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_reset_subtask()
670 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) in ice_reset_subtask()
672 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) in ice_reset_subtask()
674 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) in ice_reset_subtask()
682 if (ice_check_reset(&pf->hw)) { in ice_reset_subtask()
683 set_bit(ICE_RESET_FAILED, pf->state); in ice_reset_subtask()
686 pf->hw.reset_ongoing = false; in ice_reset_subtask()
691 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_reset_subtask()
692 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_reset_subtask()
693 clear_bit(ICE_PFR_REQ, pf->state); in ice_reset_subtask()
694 clear_bit(ICE_CORER_REQ, pf->state); in ice_reset_subtask()
695 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_reset_subtask()
696 wake_up(&pf->reset_wait_queue); in ice_reset_subtask()
704 if (test_bit(ICE_PFR_REQ, pf->state)) in ice_reset_subtask()
706 if (test_bit(ICE_CORER_REQ, pf->state)) in ice_reset_subtask()
708 if (test_bit(ICE_GLOBR_REQ, pf->state)) in ice_reset_subtask()
715 if (!test_bit(ICE_DOWN, pf->state) && in ice_reset_subtask()
716 !test_bit(ICE_CFG_BUSY, pf->state)) { in ice_reset_subtask()
722 * ice_print_topo_conflict - print topology conflict message
727 switch (vsi->port_info->phy.link_info.topo_media_conflict) { in ice_print_topo_conflict()
733 …netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not … in ice_print_topo_conflict()
736 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) in ice_print_topo_conflict()
737 …netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet … in ice_print_topo_conflict()
739 …netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was d… in ice_print_topo_conflict()
747 * ice_print_link_msg - print link up or down message
765 if (vsi->current_isup == isup) in ice_print_link_msg()
768 vsi->current_isup = isup; in ice_print_link_msg()
771 netdev_info(vsi->netdev, "NIC Link is Down\n"); in ice_print_link_msg()
775 switch (vsi->port_info->phy.link_info.link_speed) { in ice_print_link_msg()
811 switch (vsi->port_info->fc.current_mode) { in ice_print_link_msg()
830 switch (vsi->port_info->phy.link_info.fec_info) { in ice_print_link_msg()
833 fec = "RS-FEC"; in ice_print_link_msg()
836 fec = "FC-FEC/BASE-R"; in ice_print_link_msg()
844 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) in ice_print_link_msg()
857 status = ice_aq_get_phy_caps(vsi->port_info, false, in ice_print_link_msg()
860 netdev_info(vsi->netdev, "Get phy capability failed.\n"); in ice_print_link_msg()
864 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || in ice_print_link_msg()
865 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) in ice_print_link_msg()
866 fec_req = "RS-FEC"; in ice_print_link_msg()
867 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || in ice_print_link_msg()
868 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) in ice_print_link_msg()
869 fec_req = "FC-FEC/BASE-R"; in ice_print_link_msg()
876 …netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s,… in ice_print_link_msg()
882 * ice_vsi_link_event - update the VSI's netdev
891 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) in ice_vsi_link_event()
894 if (vsi->type == ICE_VSI_PF) { in ice_vsi_link_event()
895 if (link_up == netif_carrier_ok(vsi->netdev)) in ice_vsi_link_event()
899 netif_carrier_on(vsi->netdev); in ice_vsi_link_event()
900 netif_tx_wake_all_queues(vsi->netdev); in ice_vsi_link_event()
902 netif_carrier_off(vsi->netdev); in ice_vsi_link_event()
903 netif_tx_stop_all_queues(vsi->netdev); in ice_vsi_link_event()
909 * ice_set_dflt_mib - send a default config MIB to the FW
925 struct ice_hw *hw = &pf->hw; in ice_set_dflt_mib()
940 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
943 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
945 buf = tlv->tlvinfo; in ice_set_dflt_mib()
948 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. in ice_set_dflt_mib()
949 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. in ice_set_dflt_mib()
950 * Octets 13 - 20 are TSA values - leave as zeros in ice_set_dflt_mib()
956 ((char *)tlv + sizeof(tlv->typelen) + len); in ice_set_dflt_mib()
959 buf = tlv->tlvinfo; in ice_set_dflt_mib()
960 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
964 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
967 * Octets 1 - 4 map UP to TC - all UPs map to zero in ice_set_dflt_mib()
968 * Octets 5 - 12 are BW values - set TC 0 to 100%. in ice_set_dflt_mib()
969 * Octets 13 - 20 are TSA value - leave as zeros in ice_set_dflt_mib()
974 ((char *)tlv + sizeof(tlv->typelen) + len); in ice_set_dflt_mib()
979 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
983 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
985 /* Octet 1 left as all zeros - PFC disabled */ in ice_set_dflt_mib()
997 * ice_check_phy_fw_load - check if PHY FW load failed
1006 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1010 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) in ice_check_phy_fw_load()
1015 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1032 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1039 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) in ice_check_module_power()
1044 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1047 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1052 * ice_check_link_cfg_err - check if link configuration failed
1066 * ice_link_event - process the link event
1085 phy_info = &pi->phy; in ice_link_event()
1086 phy_info->link_info_old = phy_info->link_info; in ice_link_event()
1088 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); in ice_link_event()
1089 old_link_speed = phy_info->link_info_old.link_speed; in ice_link_event()
1091 /* update the link info structures and re-enable link events, in ice_link_event()
1097 pi->lport, status, in ice_link_event()
1098 ice_aq_str(pi->hw->adminq.sq_last_status)); in ice_link_event()
1100 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_link_event()
1105 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) in ice_link_event()
1109 if (!vsi || !vsi->port_info) in ice_link_event()
1110 return -EINVAL; in ice_link_event()
1113 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && in ice_link_event()
1114 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { in ice_link_event()
1115 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_link_event()
1123 if (!ice_is_e810(&pf->hw)) in ice_link_event()
1124 ice_ptp_link_change(pf, pf->hw.pf_id, link_up); in ice_link_event()
1127 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_link_event()
1142 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1150 if (test_bit(ICE_DOWN, pf->state) || in ice_watchdog_subtask()
1151 test_bit(ICE_CFG_BUSY, pf->state)) in ice_watchdog_subtask()
1156 pf->serv_tmr_prev + pf->serv_tmr_period)) in ice_watchdog_subtask()
1159 pf->serv_tmr_prev = jiffies; in ice_watchdog_subtask()
1166 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1167 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1171 * ice_init_link_events - enable/initialize link events
1174 * Returns -EIO on failure, 0 on success
1184 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { in ice_init_link_events()
1185 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", in ice_init_link_events()
1186 pi->lport); in ice_init_link_events()
1187 return -EIO; in ice_init_link_events()
1191 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", in ice_init_link_events()
1192 pi->lport); in ice_init_link_events()
1193 return -EIO; in ice_init_link_events()
1200 * ice_handle_link_event - handle link event via ARQ
1211 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; in ice_handle_link_event()
1212 port_info = pf->hw.port_info; in ice_handle_link_event()
1214 return -EINVAL; in ice_handle_link_event()
1217 !!(link_data->link_info & ICE_AQ_LINK_UP), in ice_handle_link_event()
1218 le16_to_cpu(link_data->link_speed)); in ice_handle_link_event()
1241 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1244 * @timeout: how long to wait, in jiffies
1249 * until the given timeout is reached.
1253 * event->msg_buf with enough space ahead of time.
1257 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, in ice_aq_wait_for_event() argument
1268 return -ENOMEM; in ice_aq_wait_for_event()
1270 INIT_HLIST_NODE(&task->entry); in ice_aq_wait_for_event()
1271 task->opcode = opcode; in ice_aq_wait_for_event()
1272 task->event = event; in ice_aq_wait_for_event()
1273 task->state = ICE_AQ_TASK_WAITING; in ice_aq_wait_for_event()
1275 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1276 hlist_add_head(&task->entry, &pf->aq_wait_list); in ice_aq_wait_for_event()
1277 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1281 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, in ice_aq_wait_for_event()
1282 timeout); in ice_aq_wait_for_event()
1283 switch (task->state) { in ice_aq_wait_for_event()
1285 err = ret < 0 ? ret : -ETIMEDOUT; in ice_aq_wait_for_event()
1288 err = ret < 0 ? ret : -ECANCELED; in ice_aq_wait_for_event()
1294 WARN(1, "Unexpected AdminQ wait task state %u", task->state); in ice_aq_wait_for_event()
1295 err = -EINVAL; in ice_aq_wait_for_event()
1300 jiffies_to_msecs(jiffies - start), in ice_aq_wait_for_event()
1301 jiffies_to_msecs(timeout), in ice_aq_wait_for_event()
1304 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1305 hlist_del(&task->entry); in ice_aq_wait_for_event()
1306 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1313 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1324 * Note that event->msg_buf will only be duplicated if the event has a buffer
1336 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1337 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { in ice_aq_check_events()
1338 if (task->state || task->opcode != opcode) in ice_aq_check_events()
1341 memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); in ice_aq_check_events()
1342 task->event->msg_len = event->msg_len; in ice_aq_check_events()
1345 if (task->event->msg_buf && in ice_aq_check_events()
1346 task->event->buf_len > event->buf_len) { in ice_aq_check_events()
1347 memcpy(task->event->msg_buf, event->msg_buf, in ice_aq_check_events()
1348 event->buf_len); in ice_aq_check_events()
1349 task->event->buf_len = event->buf_len; in ice_aq_check_events()
1352 task->state = ICE_AQ_TASK_COMPLETE; in ice_aq_check_events()
1355 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1358 wake_up(&pf->aq_wait_queue); in ice_aq_check_events()
1362 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1366 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1372 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1373 hlist_for_each_entry(task, &pf->aq_wait_list, entry) in ice_aq_cancel_waiting_tasks()
1374 task->state = ICE_AQ_TASK_CANCELED; in ice_aq_cancel_waiting_tasks()
1375 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1377 wake_up(&pf->aq_wait_queue); in ice_aq_cancel_waiting_tasks()
1381 * __ice_clean_ctrlq - helper function to clean controlq rings
1389 struct ice_hw *hw = &pf->hw; in __ice_clean_ctrlq()
1396 if (test_bit(ICE_RESET_FAILED, pf->state)) in __ice_clean_ctrlq()
1401 cq = &hw->adminq; in __ice_clean_ctrlq()
1405 cq = &hw->sbq; in __ice_clean_ctrlq()
1409 cq = &hw->mailboxq; in __ice_clean_ctrlq()
1414 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; in __ice_clean_ctrlq()
1421 /* check for error indications - PF_xx_AxQLEN register layout for in __ice_clean_ctrlq()
1424 val = rd32(hw, cq->rq.len); in __ice_clean_ctrlq()
1441 wr32(hw, cq->rq.len, val); in __ice_clean_ctrlq()
1444 val = rd32(hw, cq->sq.len); in __ice_clean_ctrlq()
1461 wr32(hw, cq->sq.len, val); in __ice_clean_ctrlq()
1464 event.buf_len = cq->rq_buf_size; in __ice_clean_ctrlq()
1474 if (ret == -EALREADY) in __ice_clean_ctrlq()
1518 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1528 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); in ice_ctrlq_pending()
1529 return cq->rq.next_to_clean != ntu; in ice_ctrlq_pending()
1533 * ice_clean_adminq_subtask - clean the AdminQ rings
1538 struct ice_hw *hw = &pf->hw; in ice_clean_adminq_subtask()
1540 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_clean_adminq_subtask()
1546 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_clean_adminq_subtask()
1553 if (ice_ctrlq_pending(hw, &hw->adminq)) in ice_clean_adminq_subtask()
1560 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1565 struct ice_hw *hw = &pf->hw; in ice_clean_mailboxq_subtask()
1567 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) in ice_clean_mailboxq_subtask()
1573 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_clean_mailboxq_subtask()
1575 if (ice_ctrlq_pending(hw, &hw->mailboxq)) in ice_clean_mailboxq_subtask()
1582 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1587 struct ice_hw *hw = &pf->hw; in ice_clean_sbq_subtask()
1591 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1595 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) in ice_clean_sbq_subtask()
1601 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1603 if (ice_ctrlq_pending(hw, &hw->sbq)) in ice_clean_sbq_subtask()
1610 * ice_service_task_schedule - schedule the service task to wake up
1617 if (!test_bit(ICE_SERVICE_DIS, pf->state) && in ice_service_task_schedule()
1618 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && in ice_service_task_schedule()
1619 !test_bit(ICE_NEEDS_RESTART, pf->state)) in ice_service_task_schedule()
1620 queue_work(ice_wq, &pf->serv_task); in ice_service_task_schedule()
1624 * ice_service_task_complete - finish up the service task
1629 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); in ice_service_task_complete()
1631 /* force memory (pf->state) to sync before next service task */ in ice_service_task_complete()
1633 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_complete()
1637 * ice_service_task_stop - stop service task and cancel works
1647 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_stop()
1649 if (pf->serv_tmr.function) in ice_service_task_stop()
1650 del_timer_sync(&pf->serv_tmr); in ice_service_task_stop()
1651 if (pf->serv_task.func) in ice_service_task_stop()
1652 cancel_work_sync(&pf->serv_task); in ice_service_task_stop()
1654 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_stop()
1659 * ice_service_task_restart - restart service task and schedule works
1666 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_restart()
1671 * ice_service_timer - timer callback to schedule service task
1678 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); in ice_service_timer()
1683 * ice_handle_mdd_event - handle malicious driver detect event
1690 * private flag mdd-auto-reset-vf.
1695 struct ice_hw *hw = &pf->hw; in ice_handle_mdd_event()
1700 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { in ice_handle_mdd_event()
1785 mutex_lock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1787 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); in ice_handle_mdd_event()
1789 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1790 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1791 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1794 vf->vf_id); in ice_handle_mdd_event()
1797 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); in ice_handle_mdd_event()
1799 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1800 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1801 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1804 vf->vf_id); in ice_handle_mdd_event()
1807 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); in ice_handle_mdd_event()
1809 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1810 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1811 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1814 vf->vf_id); in ice_handle_mdd_event()
1817 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); in ice_handle_mdd_event()
1819 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1820 vf->mdd_rx_events.count++; in ice_handle_mdd_event()
1821 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1824 vf->vf_id); in ice_handle_mdd_event()
1828 * private flag mdd-auto-reset-vf. in ice_handle_mdd_event()
1830 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { in ice_handle_mdd_event()
1839 mutex_unlock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1845 * ice_force_phys_link_state - Force the physical link state
1864 if (!vsi || !vsi->port_info || !vsi->back) in ice_force_phys_link_state()
1865 return -EINVAL; in ice_force_phys_link_state()
1866 if (vsi->type != ICE_VSI_PF) in ice_force_phys_link_state()
1869 dev = ice_pf_to_dev(vsi->back); in ice_force_phys_link_state()
1871 pi = vsi->port_info; in ice_force_phys_link_state()
1875 return -ENOMEM; in ice_force_phys_link_state()
1881 vsi->vsi_num, retcode); in ice_force_phys_link_state()
1882 retcode = -EIO; in ice_force_phys_link_state()
1887 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && in ice_force_phys_link_state()
1888 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) in ice_force_phys_link_state()
1895 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); in ice_force_phys_link_state()
1897 retcode = -ENOMEM; in ice_force_phys_link_state()
1901 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; in ice_force_phys_link_state()
1903 cfg->caps |= ICE_AQ_PHY_ENA_LINK; in ice_force_phys_link_state()
1905 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; in ice_force_phys_link_state()
1907 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); in ice_force_phys_link_state()
1910 vsi->vsi_num, retcode); in ice_force_phys_link_state()
1911 retcode = -EIO; in ice_force_phys_link_state()
1921 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1929 struct ice_pf *pf = pi->hw->back; in ice_init_nvm_phy_type()
1934 return -ENOMEM; in ice_init_nvm_phy_type()
1944 pf->nvm_phy_type_hi = pcaps->phy_type_high; in ice_init_nvm_phy_type()
1945 pf->nvm_phy_type_lo = pcaps->phy_type_low; in ice_init_nvm_phy_type()
1953 * ice_init_link_dflt_override - Initialize link default override
1961 struct ice_pf *pf = pi->hw->back; in ice_init_link_dflt_override()
1963 ldo = &pf->link_dflt_override; in ice_init_link_dflt_override()
1967 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) in ice_init_link_dflt_override()
1970 /* Enable Total Port Shutdown (override/replace link-down-on-close in ice_init_link_dflt_override()
1973 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in ice_init_link_dflt_override()
1974 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in ice_init_link_dflt_override()
1978 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1998 struct ice_phy_info *phy = &pi->phy; in ice_init_phy_cfg_dflt_override()
1999 struct ice_pf *pf = pi->hw->back; in ice_init_phy_cfg_dflt_override()
2001 ldo = &pf->link_dflt_override; in ice_init_phy_cfg_dflt_override()
2006 cfg = &phy->curr_user_phy_cfg; in ice_init_phy_cfg_dflt_override()
2008 if (ldo->phy_type_low || ldo->phy_type_high) { in ice_init_phy_cfg_dflt_override()
2009 cfg->phy_type_low = pf->nvm_phy_type_lo & in ice_init_phy_cfg_dflt_override()
2010 cpu_to_le64(ldo->phy_type_low); in ice_init_phy_cfg_dflt_override()
2011 cfg->phy_type_high = pf->nvm_phy_type_hi & in ice_init_phy_cfg_dflt_override()
2012 cpu_to_le64(ldo->phy_type_high); in ice_init_phy_cfg_dflt_override()
2014 cfg->link_fec_opt = ldo->fec_options; in ice_init_phy_cfg_dflt_override()
2015 phy->curr_user_fec_req = ICE_FEC_AUTO; in ice_init_phy_cfg_dflt_override()
2017 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); in ice_init_phy_cfg_dflt_override()
2021 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2037 struct ice_phy_info *phy = &pi->phy; in ice_init_phy_user_cfg()
2038 struct ice_pf *pf = pi->hw->back; in ice_init_phy_user_cfg()
2041 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) in ice_init_phy_user_cfg()
2042 return -EIO; in ice_init_phy_user_cfg()
2046 return -ENOMEM; in ice_init_phy_user_cfg()
2048 if (ice_fw_supports_report_dflt_cfg(pi->hw)) in ice_init_phy_user_cfg()
2059 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); in ice_init_phy_user_cfg()
2062 if (ice_fw_supports_link_override(pi->hw) && in ice_init_phy_user_cfg()
2063 !(pcaps->module_compliance_enforcement & in ice_init_phy_user_cfg()
2065 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); in ice_init_phy_user_cfg()
2071 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && in ice_init_phy_user_cfg()
2072 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { in ice_init_phy_user_cfg()
2081 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, in ice_init_phy_user_cfg()
2082 pcaps->link_fec_options); in ice_init_phy_user_cfg()
2083 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); in ice_init_phy_user_cfg()
2086 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; in ice_init_phy_user_cfg()
2087 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); in ice_init_phy_user_cfg()
2094 * ice_configure_phy - configure PHY
2103 struct device *dev = ice_pf_to_dev(vsi->back); in ice_configure_phy()
2104 struct ice_port_info *pi = vsi->port_info; in ice_configure_phy()
2107 struct ice_phy_info *phy = &pi->phy; in ice_configure_phy()
2108 struct ice_pf *pf = vsi->back; in ice_configure_phy()
2112 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) in ice_configure_phy()
2113 return -EPERM; in ice_configure_phy()
2117 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && in ice_configure_phy()
2118 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) in ice_configure_phy()
2119 return -EPERM; in ice_configure_phy()
2121 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) in ice_configure_phy()
2126 return -ENOMEM; in ice_configure_phy()
2133 vsi->vsi_num, err); in ice_configure_phy()
2140 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && in ice_configure_phy()
2141 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) in ice_configure_phy()
2146 if (ice_fw_supports_report_dflt_cfg(pi->hw)) in ice_configure_phy()
2154 vsi->vsi_num, err); in ice_configure_phy()
2160 err = -ENOMEM; in ice_configure_phy()
2166 /* Speed - If default override pending, use curr_user_phy_cfg set in in ice_configure_phy()
2170 vsi->back->state)) { in ice_configure_phy()
2171 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; in ice_configure_phy()
2172 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; in ice_configure_phy()
2177 pi->phy.curr_user_speed_req); in ice_configure_phy()
2178 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); in ice_configure_phy()
2179 cfg->phy_type_high = pcaps->phy_type_high & in ice_configure_phy()
2184 if (!cfg->phy_type_low && !cfg->phy_type_high) { in ice_configure_phy()
2185 cfg->phy_type_low = pcaps->phy_type_low; in ice_configure_phy()
2186 cfg->phy_type_high = pcaps->phy_type_high; in ice_configure_phy()
2190 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); in ice_configure_phy()
2193 if (cfg->link_fec_opt != in ice_configure_phy()
2194 (cfg->link_fec_opt & pcaps->link_fec_options)) { in ice_configure_phy()
2195 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; in ice_configure_phy()
2196 cfg->link_fec_opt = pcaps->link_fec_options; in ice_configure_phy()
2199 /* Flow Control - always supported; no need to check against in ice_configure_phy()
2202 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); in ice_configure_phy()
2205 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; in ice_configure_phy()
2207 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); in ice_configure_phy()
2210 vsi->vsi_num, err); in ice_configure_phy()
2219 * ice_check_media_subtask - Check for media
2232 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) in ice_check_media_subtask()
2240 pi = vsi->port_info; in ice_check_media_subtask()
2245 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_check_media_subtask()
2247 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_check_media_subtask()
2248 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) in ice_check_media_subtask()
2254 if (test_bit(ICE_VSI_DOWN, vsi->state) && in ice_check_media_subtask()
2255 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) in ice_check_media_subtask()
2260 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_check_media_subtask()
2269 * ice_service_task - manage and run subtasks
2283 if (ice_is_reset_in_progress(pf->state) || in ice_service_task()
2284 test_bit(ICE_SUSPENDED, pf->state) || in ice_service_task()
2285 test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_service_task()
2290 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { in ice_service_task()
2295 set_bit(IIDC_EVENT_CRIT_ERR, event->type); in ice_service_task()
2297 swap(event->reg, pf->oicr_err_reg); in ice_service_task()
2303 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { in ice_service_task()
2312 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) in ice_service_task()
2316 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { in ice_service_task()
2321 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); in ice_service_task()
2352 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || in ice_service_task()
2353 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || in ice_service_task()
2354 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_service_task()
2355 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || in ice_service_task()
2356 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || in ice_service_task()
2357 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || in ice_service_task()
2358 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_service_task()
2359 mod_timer(&pf->serv_tmr, jiffies); in ice_service_task()
2363 * ice_set_ctrlq_len - helper function to set controlq length
2368 hw->adminq.num_rq_entries = ICE_AQ_LEN; in ice_set_ctrlq_len()
2369 hw->adminq.num_sq_entries = ICE_AQ_LEN; in ice_set_ctrlq_len()
2370 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2371 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2372 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; in ice_set_ctrlq_len()
2373 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; in ice_set_ctrlq_len()
2374 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2375 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2376 hw->sbq.num_rq_entries = ICE_SBQ_LEN; in ice_set_ctrlq_len()
2377 hw->sbq.num_sq_entries = ICE_SBQ_LEN; in ice_set_ctrlq_len()
2378 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2379 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2383 * ice_schedule_reset - schedule a reset
2392 if (test_bit(ICE_RESET_FAILED, pf->state)) { in ice_schedule_reset()
2394 return -EIO; in ice_schedule_reset()
2397 if (ice_is_reset_in_progress(pf->state)) { in ice_schedule_reset()
2399 return -EBUSY; in ice_schedule_reset()
2404 set_bit(ICE_PFR_REQ, pf->state); in ice_schedule_reset()
2407 set_bit(ICE_CORER_REQ, pf->state); in ice_schedule_reset()
2410 set_bit(ICE_GLOBR_REQ, pf->state); in ice_schedule_reset()
2413 return -EINVAL; in ice_schedule_reset()
2421 * ice_irq_affinity_notify - Callback for affinity changes
2435 cpumask_copy(&q_vector->affinity_mask, mask); in ice_irq_affinity_notify()
2439 * ice_irq_affinity_release - Callback for affinity notifier release
2449 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2454 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_ena_irq()
2458 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); in ice_vsi_ena_irq()
2465 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2471 int q_vectors = vsi->num_q_vectors; in ice_vsi_req_irq_msix()
2472 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix()
2473 int base = vsi->base_vector; in ice_vsi_req_irq_msix()
2482 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; in ice_vsi_req_irq_msix()
2484 irq_num = pf->msix_entries[base + vector].vector; in ice_vsi_req_irq_msix()
2486 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { in ice_vsi_req_irq_msix()
2487 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2488 "%s-%s-%d", basename, "TxRx", rx_int_idx++); in ice_vsi_req_irq_msix()
2490 } else if (q_vector->rx.rx_ring) { in ice_vsi_req_irq_msix()
2491 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2492 "%s-%s-%d", basename, "rx", rx_int_idx++); in ice_vsi_req_irq_msix()
2493 } else if (q_vector->tx.tx_ring) { in ice_vsi_req_irq_msix()
2494 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2495 "%s-%s-%d", basename, "tx", tx_int_idx++); in ice_vsi_req_irq_msix()
2500 if (vsi->type == ICE_VSI_CTRL && vsi->vf) in ice_vsi_req_irq_msix()
2501 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2502 IRQF_SHARED, q_vector->name, in ice_vsi_req_irq_msix()
2505 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2506 0, q_vector->name, q_vector); in ice_vsi_req_irq_msix()
2508 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", in ice_vsi_req_irq_msix()
2517 affinity_notify = &q_vector->affinity_notify; in ice_vsi_req_irq_msix()
2518 affinity_notify->notify = ice_irq_affinity_notify; in ice_vsi_req_irq_msix()
2519 affinity_notify->release = ice_irq_affinity_release; in ice_vsi_req_irq_msix()
2524 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); in ice_vsi_req_irq_msix()
2529 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", in ice_vsi_req_irq_msix()
2530 vsi->vsi_num, ERR_PTR(err)); in ice_vsi_req_irq_msix()
2534 vsi->irqs_ready = true; in ice_vsi_req_irq_msix()
2539 vector--; in ice_vsi_req_irq_msix()
2540 irq_num = pf->msix_entries[base + vector].vector; in ice_vsi_req_irq_msix()
2544 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); in ice_vsi_req_irq_msix()
2550 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2557 struct device *dev = ice_pf_to_dev(vsi->back); in ice_xdp_alloc_setup_rings()
2562 u16 xdp_q_idx = vsi->alloc_txq + i; in ice_xdp_alloc_setup_rings()
2570 xdp_ring->q_index = xdp_q_idx; in ice_xdp_alloc_setup_rings()
2571 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; in ice_xdp_alloc_setup_rings()
2572 xdp_ring->vsi = vsi; in ice_xdp_alloc_setup_rings()
2573 xdp_ring->netdev = NULL; in ice_xdp_alloc_setup_rings()
2574 xdp_ring->dev = dev; in ice_xdp_alloc_setup_rings()
2575 xdp_ring->count = vsi->num_tx_desc; in ice_xdp_alloc_setup_rings()
2576 xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1; in ice_xdp_alloc_setup_rings()
2577 xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1; in ice_xdp_alloc_setup_rings()
2578 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); in ice_xdp_alloc_setup_rings()
2582 spin_lock_init(&xdp_ring->tx_lock); in ice_xdp_alloc_setup_rings()
2583 for (j = 0; j < xdp_ring->count; j++) { in ice_xdp_alloc_setup_rings()
2585 tx_desc->cmd_type_offset_bsz = 0; in ice_xdp_alloc_setup_rings()
2592 for (; i >= 0; i--) in ice_xdp_alloc_setup_rings()
2593 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in ice_xdp_alloc_setup_rings()
2594 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_xdp_alloc_setup_rings()
2595 return -ENOMEM; in ice_xdp_alloc_setup_rings()
2599 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2608 old_prog = xchg(&vsi->xdp_prog, prog); in ice_vsi_assign_bpf_prog()
2613 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in ice_vsi_assign_bpf_prog()
2617 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2626 int xdp_rings_rem = vsi->num_xdp_txq; in ice_prepare_xdp_rings()
2627 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings()
2629 .qs_mutex = &pf->avail_q_mutex, in ice_prepare_xdp_rings()
2630 .pf_map = pf->avail_txqs, in ice_prepare_xdp_rings()
2631 .pf_map_size = pf->max_pf_txqs, in ice_prepare_xdp_rings()
2632 .q_count = vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2634 .vsi_map = vsi->txq_map, in ice_prepare_xdp_rings()
2635 .vsi_map_offset = vsi->alloc_txq, in ice_prepare_xdp_rings()
2643 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2644 sizeof(*vsi->xdp_rings), GFP_KERNEL); in ice_prepare_xdp_rings()
2645 if (!vsi->xdp_rings) in ice_prepare_xdp_rings()
2646 return -ENOMEM; in ice_prepare_xdp_rings()
2648 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; in ice_prepare_xdp_rings()
2653 netdev_warn(vsi->netdev, in ice_prepare_xdp_rings()
2661 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_prepare_xdp_rings()
2665 vsi->num_q_vectors - v_idx); in ice_prepare_xdp_rings()
2666 q_base = vsi->num_xdp_txq - xdp_rings_rem; in ice_prepare_xdp_rings()
2669 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; in ice_prepare_xdp_rings()
2671 xdp_ring->q_vector = q_vector; in ice_prepare_xdp_rings()
2672 xdp_ring->next = q_vector->tx.tx_ring; in ice_prepare_xdp_rings()
2673 q_vector->tx.tx_ring = xdp_ring; in ice_prepare_xdp_rings()
2675 xdp_rings_rem -= xdp_rings_per_v; in ice_prepare_xdp_rings()
2680 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; in ice_prepare_xdp_rings()
2682 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; in ice_prepare_xdp_rings()
2685 ice_for_each_tx_ring(ring, q_vector->tx) { in ice_prepare_xdp_rings()
2687 vsi->rx_rings[i]->xdp_ring = ring; in ice_prepare_xdp_rings()
2699 if (ice_is_reset_in_progress(pf->state)) in ice_prepare_xdp_rings()
2705 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_prepare_xdp_rings()
2706 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; in ice_prepare_xdp_rings()
2708 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_prepare_xdp_rings()
2717 * this flow is a subject of both ethtool -L and ndo_bpf flows; in ice_prepare_xdp_rings()
2718 * VSI rebuild that happens under ethtool -L can expose us to in ice_prepare_xdp_rings()
2720 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put in ice_prepare_xdp_rings()
2731 if (vsi->xdp_rings[i]) { in ice_prepare_xdp_rings()
2732 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_prepare_xdp_rings()
2733 vsi->xdp_rings[i] = NULL; in ice_prepare_xdp_rings()
2737 mutex_lock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2739 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2740 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_prepare_xdp_rings()
2742 mutex_unlock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2744 devm_kfree(dev, vsi->xdp_rings); in ice_prepare_xdp_rings()
2745 return -ENOMEM; in ice_prepare_xdp_rings()
2749 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2758 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings()
2763 * in pf->state won't be set, so additionally check first q_vector in ice_destroy_xdp_rings()
2766 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) in ice_destroy_xdp_rings()
2770 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_destroy_xdp_rings()
2773 ice_for_each_tx_ring(ring, q_vector->tx) in ice_destroy_xdp_rings()
2774 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) in ice_destroy_xdp_rings()
2778 q_vector->tx.tx_ring = ring; in ice_destroy_xdp_rings()
2782 mutex_lock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2784 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2785 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_destroy_xdp_rings()
2787 mutex_unlock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2790 if (vsi->xdp_rings[i]) { in ice_destroy_xdp_rings()
2791 if (vsi->xdp_rings[i]->desc) { in ice_destroy_xdp_rings()
2793 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_destroy_xdp_rings()
2795 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_destroy_xdp_rings()
2796 vsi->xdp_rings[i] = NULL; in ice_destroy_xdp_rings()
2799 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
2800 vsi->xdp_rings = NULL; in ice_destroy_xdp_rings()
2805 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) in ice_destroy_xdp_rings()
2813 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_destroy_xdp_rings()
2814 max_txqs[i] = vsi->num_txq; in ice_destroy_xdp_rings()
2817 vsi->num_xdp_txq = 0; in ice_destroy_xdp_rings()
2819 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_destroy_xdp_rings()
2824 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2832 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; in ice_vsi_rx_napi_schedule()
2834 if (rx_ring->xsk_pool) in ice_vsi_rx_napi_schedule()
2835 napi_schedule(&rx_ring->q_vector->napi); in ice_vsi_rx_napi_schedule()
2840 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2844 * -ENOMEM otherwise
2848 u16 avail = ice_get_avail_txq_count(vsi->back); in ice_vsi_determine_xdp_res()
2852 return -ENOMEM; in ice_vsi_determine_xdp_res()
2854 vsi->num_xdp_txq = min_t(u16, avail, cpus); in ice_vsi_determine_xdp_res()
2856 if (vsi->num_xdp_txq < cpus) in ice_vsi_determine_xdp_res()
2863 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2872 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; in ice_xdp_setup_prog()
2873 bool if_running = netif_running(vsi->netdev); in ice_xdp_setup_prog()
2876 if (frame_size > vsi->rx_buf_len) { in ice_xdp_setup_prog()
2878 return -EOPNOTSUPP; in ice_xdp_setup_prog()
2882 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { in ice_xdp_setup_prog()
2899 /* reallocate Rx queues that are used for zero-copy */ in ice_xdp_setup_prog()
2907 /* reallocate Rx queues that were used for zero-copy */ in ice_xdp_setup_prog()
2912 /* safe to call even when prog == vsi->xdp_prog as in ice_xdp_setup_prog()
2926 return (ret || xdp_ring_err) ? -ENOMEM : 0; in ice_xdp_setup_prog()
2930 * ice_xdp_safe_mode - XDP handler for safe mode
2937 NL_SET_ERR_MSG_MOD(xdp->extack, in ice_xdp_safe_mode()
2940 return -EOPNOTSUPP; in ice_xdp_safe_mode()
2944 * ice_xdp - implements XDP handler
2951 struct ice_vsi *vsi = np->vsi; in ice_xdp()
2953 if (vsi->type != ICE_VSI_PF) { in ice_xdp()
2954 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); in ice_xdp()
2955 return -EINVAL; in ice_xdp()
2958 switch (xdp->command) { in ice_xdp()
2960 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); in ice_xdp()
2962 return ice_xsk_pool_setup(vsi, xdp->xsk.pool, in ice_xdp()
2963 xdp->xsk.queue_id); in ice_xdp()
2965 return -EINVAL; in ice_xdp()
2970 * ice_ena_misc_vector - enable the non-queue interrupts
2975 struct ice_hw *hw = &pf->hw; in ice_ena_misc_vector()
2978 /* Disable anti-spoof detection interrupt to prevent spurious event in ice_ena_misc_vector()
2979 * interrupts during a function reset. Anti-spoof functionally is in ice_ena_misc_vector()
3002 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), in ice_ena_misc_vector()
3007 * ice_misc_intr - misc interrupt handler
3014 struct ice_hw *hw = &pf->hw; in ice_misc_intr()
3020 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3021 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3022 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3029 pf->sw_int_count++; in ice_misc_intr()
3034 set_bit(ICE_MDD_EVENT_PENDING, pf->state); in ice_misc_intr()
3038 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_misc_intr()
3045 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); in ice_misc_intr()
3058 pf->corer_count++; in ice_misc_intr()
3060 pf->globr_count++; in ice_misc_intr()
3062 pf->empr_count++; in ice_misc_intr()
3067 * pf->state so that the service task can start a reset/rebuild. in ice_misc_intr()
3069 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_misc_intr()
3071 set_bit(ICE_CORER_RECV, pf->state); in ice_misc_intr()
3073 set_bit(ICE_GLOBR_RECV, pf->state); in ice_misc_intr()
3075 set_bit(ICE_EMPR_RECV, pf->state); in ice_misc_intr()
3078 * hw->reset_ongoing indicates whether the hardware is in ice_misc_intr()
3083 * ICE_RESET_OICR_RECV in pf->state indicates in ice_misc_intr()
3090 hw->reset_ongoing = true; in ice_misc_intr()
3096 if (!hw->reset_ongoing) in ice_misc_intr()
3101 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; in ice_misc_intr()
3105 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | in ice_misc_intr()
3109 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); in ice_misc_intr()
3114 pf->oicr_err_reg |= oicr; in ice_misc_intr()
3115 set_bit(ICE_AUX_ERR_PENDING, pf->state); in ice_misc_intr()
3128 set_bit(ICE_PFR_REQ, pf->state); in ice_misc_intr()
3142 * ice_misc_intr_thread_fn - misc interrupt thread function
3150 if (ice_is_reset_in_progress(pf->state)) in ice_misc_intr_thread_fn()
3160 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3184 * ice_free_irq_msix_misc - Unroll misc vector setup
3189 struct ice_hw *hw = &pf->hw; in ice_free_irq_msix_misc()
3197 if (pf->msix_entries) { in ice_free_irq_msix_misc()
3198 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); in ice_free_irq_msix_misc()
3200 pf->msix_entries[pf->oicr_idx].vector, pf); in ice_free_irq_msix_misc()
3203 pf->num_avail_sw_msix += 1; in ice_free_irq_msix_misc()
3204 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); in ice_free_irq_msix_misc()
3208 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3239 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3243 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3249 struct ice_hw *hw = &pf->hw; in ice_req_irq_msix_misc()
3252 if (!pf->int_name[0]) in ice_req_irq_msix_misc()
3253 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", in ice_req_irq_msix_misc()
3260 if (ice_is_reset_in_progress(pf->state)) in ice_req_irq_msix_misc()
3264 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); in ice_req_irq_msix_misc()
3268 pf->num_avail_sw_msix -= 1; in ice_req_irq_msix_misc()
3269 pf->oicr_idx = (u16)oicr_idx; in ice_req_irq_msix_misc()
3272 pf->msix_entries[pf->oicr_idx].vector, in ice_req_irq_msix_misc()
3274 0, pf->int_name, pf); in ice_req_irq_msix_misc()
3277 pf->int_name, err); in ice_req_irq_msix_misc()
3278 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); in ice_req_irq_msix_misc()
3279 pf->num_avail_sw_msix += 1; in ice_req_irq_msix_misc()
3286 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); in ice_req_irq_msix_misc()
3287 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), in ice_req_irq_msix_misc()
3297 * ice_napi_add - register NAPI handler for the VSI
3308 if (!vsi->netdev) in ice_napi_add()
3312 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, in ice_napi_add()
3317 * ice_set_ops - set netdev and ethtools ops for the given netdev
3325 netdev->netdev_ops = &ice_netdev_safe_mode_ops; in ice_set_ops()
3330 netdev->netdev_ops = &ice_netdev_ops; in ice_set_ops()
3331 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; in ice_set_ops()
3336 * ice_set_netdev_features - set features for the given netdev
3342 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); in ice_set_netdev_features()
3350 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; in ice_set_netdev_features()
3351 netdev->hw_features = netdev->features; in ice_set_netdev_features()
3385 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | in ice_set_netdev_features()
3388 netdev->hw_features = dflt_features | csumo_features | in ice_set_netdev_features()
3392 netdev->mpls_features = NETIF_F_HW_CSUM | in ice_set_netdev_features()
3397 netdev->features |= netdev->hw_features; in ice_set_netdev_features()
3399 netdev->hw_features |= NETIF_F_HW_TC; in ice_set_netdev_features()
3400 netdev->hw_features |= NETIF_F_LOOPBACK; in ice_set_netdev_features()
3403 netdev->hw_enc_features |= dflt_features | csumo_features | in ice_set_netdev_features()
3405 netdev->vlan_features |= dflt_features | csumo_features | in ice_set_netdev_features()
3414 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | in ice_set_netdev_features()
3420 netdev->hw_features |= NETIF_F_RXFCS; in ice_set_netdev_features()
3424 * ice_cfg_netdev - Allocate, configure and register a netdev
3435 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, in ice_cfg_netdev()
3436 vsi->alloc_rxq); in ice_cfg_netdev()
3438 return -ENOMEM; in ice_cfg_netdev()
3440 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_cfg_netdev()
3441 vsi->netdev = netdev; in ice_cfg_netdev()
3443 np->vsi = vsi; in ice_cfg_netdev()
3449 if (vsi->type == ICE_VSI_PF) { in ice_cfg_netdev()
3450 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); in ice_cfg_netdev()
3451 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_cfg_netdev()
3453 ether_addr_copy(netdev->perm_addr, mac_addr); in ice_cfg_netdev()
3456 netdev->priv_flags |= IFF_UNICAST_FLT; in ice_cfg_netdev()
3459 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_cfg_netdev()
3461 /* setup watchdog timeout value to be 5 second */ in ice_cfg_netdev()
3462 netdev->watchdog_timeo = 5 * HZ; in ice_cfg_netdev()
3464 netdev->min_mtu = ETH_MIN_MTU; in ice_cfg_netdev()
3465 netdev->max_mtu = ICE_MAX_MTU; in ice_cfg_netdev()
3471 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3485 * ice_pf_vsi_setup - Set up a PF VSI
3506 * ice_ctrl_vsi_setup - Set up a control VSI
3520 * ice_lb_vsi_setup - Set up a loopback VSI
3534 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3546 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_add_vid()
3554 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_add_vid()
3558 * all-multicast is currently enabled. in ice_vlan_rx_add_vid()
3560 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_add_vid()
3561 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3574 ret = vlan_ops->add_vlan(vsi, &vlan); in ice_vlan_rx_add_vid()
3578 /* If all-multicast is currently enabled and this VLAN ID is only one in ice_vlan_rx_add_vid()
3579 * besides VLAN-0 we have to update look-up type of multicast promisc in ice_vlan_rx_add_vid()
3580 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. in ice_vlan_rx_add_vid()
3582 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && in ice_vlan_rx_add_vid()
3584 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3586 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3591 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_add_vid()
3597 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3609 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_kill_vid()
3617 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_kill_vid()
3620 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3624 vsi->vsi_num); in ice_vlan_rx_kill_vid()
3625 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vlan_rx_kill_vid()
3634 ret = vlan_ops->del_vlan(vsi, &vlan); in ice_vlan_rx_kill_vid()
3639 * all-multicast is enabled. in ice_vlan_rx_kill_vid()
3641 if (vsi->current_netdev_flags & IFF_ALLMULTI) in ice_vlan_rx_kill_vid()
3642 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3646 /* Update look-up type of multicast promisc rule for VLAN 0 in ice_vlan_rx_kill_vid()
3648 * all-multicast is enabled and VLAN 0 is the only VLAN rule. in ice_vlan_rx_kill_vid()
3650 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_kill_vid()
3651 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3654 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3660 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_kill_vid()
3673 list_del(&indr_priv->list); in ice_rep_indr_tc_block_unbind()
3678 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3683 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); in ice_tc_indir_block_unregister()
3690 * ice_tc_indir_block_remove - clean indirect TC block notifications
3704 * ice_tc_indir_block_register - Register TC indirect block notifications
3713 if (!vsi || !vsi->netdev) in ice_tc_indir_block_register()
3714 return -EINVAL; in ice_tc_indir_block_register()
3716 np = netdev_priv(vsi->netdev); in ice_tc_indir_block_register()
3718 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); in ice_tc_indir_block_register()
3723 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3731 bool dvm = ice_is_dvm_ena(&pf->hw); in ice_setup_pf_sw()
3735 if (ice_is_reset_in_progress(pf->state)) in ice_setup_pf_sw()
3736 return -EBUSY; in ice_setup_pf_sw()
3738 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_setup_pf_sw()
3740 return -EIO; in ice_setup_pf_sw()
3742 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_setup_pf_sw()
3744 return -ENOMEM; in ice_setup_pf_sw()
3747 INIT_LIST_HEAD(&vsi->ch_list); in ice_setup_pf_sw()
3782 if (vsi->netdev) { in ice_setup_pf_sw()
3783 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_setup_pf_sw()
3784 free_netdev(vsi->netdev); in ice_setup_pf_sw()
3785 vsi->netdev = NULL; in ice_setup_pf_sw()
3795 * ice_get_avail_q_count - Get count of queues in use
3815 * ice_get_avail_txq_count - Get count of Tx queues in use
3820 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, in ice_get_avail_txq_count()
3821 pf->max_pf_txqs); in ice_get_avail_txq_count()
3825 * ice_get_avail_rxq_count - Get count of Rx queues in use
3830 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, in ice_get_avail_rxq_count()
3831 pf->max_pf_rxqs); in ice_get_avail_rxq_count()
3835 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3841 mutex_destroy(&pf->adev_mutex); in ice_deinit_pf()
3842 mutex_destroy(&pf->sw_mutex); in ice_deinit_pf()
3843 mutex_destroy(&pf->tc_mutex); in ice_deinit_pf()
3844 mutex_destroy(&pf->avail_q_mutex); in ice_deinit_pf()
3845 mutex_destroy(&pf->vfs.table_lock); in ice_deinit_pf()
3847 if (pf->avail_txqs) { in ice_deinit_pf()
3848 bitmap_free(pf->avail_txqs); in ice_deinit_pf()
3849 pf->avail_txqs = NULL; in ice_deinit_pf()
3852 if (pf->avail_rxqs) { in ice_deinit_pf()
3853 bitmap_free(pf->avail_rxqs); in ice_deinit_pf()
3854 pf->avail_rxqs = NULL; in ice_deinit_pf()
3857 if (pf->ptp.clock) in ice_deinit_pf()
3858 ptp_clock_unregister(pf->ptp.clock); in ice_deinit_pf()
3862 * ice_set_pf_caps - set PFs capability flags
3867 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; in ice_set_pf_caps()
3869 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3870 if (func_caps->common_cap.rdma) in ice_set_pf_caps()
3871 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
3872 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3873 if (func_caps->common_cap.dcb) in ice_set_pf_caps()
3874 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
3875 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
3876 if (func_caps->common_cap.sr_iov_1_1) { in ice_set_pf_caps()
3877 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
3878 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, in ice_set_pf_caps()
3881 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
3882 if (func_caps->common_cap.rss_table_size) in ice_set_pf_caps()
3883 set_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
3885 clear_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
3886 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { in ice_set_pf_caps()
3892 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_set_pf_caps()
3893 set_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
3895 ice_alloc_fd_guar_item(&pf->hw, &unused, in ice_set_pf_caps()
3896 func_caps->fd_fltr_guar); in ice_set_pf_caps()
3898 ice_alloc_fd_shrd_item(&pf->hw, &unused, in ice_set_pf_caps()
3899 func_caps->fd_fltr_best_effort); in ice_set_pf_caps()
3902 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
3903 if (func_caps->common_cap.ieee_1588) in ice_set_pf_caps()
3904 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
3906 pf->max_pf_txqs = func_caps->common_cap.num_txq; in ice_set_pf_caps()
3907 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps()
3911 * ice_init_pf - Initialize general software structures (struct ice_pf)
3918 mutex_init(&pf->sw_mutex); in ice_init_pf()
3919 mutex_init(&pf->tc_mutex); in ice_init_pf()
3920 mutex_init(&pf->adev_mutex); in ice_init_pf()
3922 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_init_pf()
3923 spin_lock_init(&pf->aq_wait_lock); in ice_init_pf()
3924 init_waitqueue_head(&pf->aq_wait_queue); in ice_init_pf()
3926 init_waitqueue_head(&pf->reset_wait_queue); in ice_init_pf()
3929 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_init_pf()
3930 pf->serv_tmr_period = HZ; in ice_init_pf()
3931 INIT_WORK(&pf->serv_task, ice_service_task); in ice_init_pf()
3932 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_init_pf()
3934 mutex_init(&pf->avail_q_mutex); in ice_init_pf()
3935 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
3936 if (!pf->avail_txqs) in ice_init_pf()
3937 return -ENOMEM; in ice_init_pf()
3939 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); in ice_init_pf()
3940 if (!pf->avail_rxqs) { in ice_init_pf()
3941 bitmap_free(pf->avail_txqs); in ice_init_pf()
3942 pf->avail_txqs = NULL; in ice_init_pf()
3943 return -ENOMEM; in ice_init_pf()
3946 mutex_init(&pf->vfs.table_lock); in ice_init_pf()
3947 hash_init(pf->vfs.table); in ice_init_pf()
3953 * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
3955 * @v_remain: number of remaining MSI-X vectors to be distributed
3957 * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
3958 * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
3966 pf->num_lan_msix = v_remain; in ice_reduce_msix_usage()
3974 dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); in ice_reduce_msix_usage()
3975 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_reduce_msix_usage()
3977 pf->num_rdma_msix = 0; in ice_reduce_msix_usage()
3978 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; in ice_reduce_msix_usage()
3980 (v_remain - v_rdma < v_rdma)) { in ice_reduce_msix_usage()
3982 pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; in ice_reduce_msix_usage()
3983 pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; in ice_reduce_msix_usage()
3987 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + in ice_reduce_msix_usage()
3989 pf->num_lan_msix = v_remain - pf->num_rdma_msix; in ice_reduce_msix_usage()
3994 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
4007 hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_ena_msix_range()
4014 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_ena_msix_range()
4023 pf->num_lan_msix = num_cpus; in ice_ena_msix_range()
4024 v_wanted += pf->num_lan_msix; in ice_ena_msix_range()
4028 pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; in ice_ena_msix_range()
4029 v_wanted += pf->num_rdma_msix; in ice_ena_msix_range()
4035 dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", in ice_ena_msix_range()
4039 err = -ERANGE; in ice_ena_msix_range()
4043 v_remain = hw_num_msix - v_other; in ice_ena_msix_range()
4045 v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; in ice_ena_msix_range()
4050 v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; in ice_ena_msix_range()
4052 dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", in ice_ena_msix_range()
4053 pf->num_lan_msix); in ice_ena_msix_range()
4055 dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", in ice_ena_msix_range()
4056 pf->num_rdma_msix); in ice_ena_msix_range()
4059 pf->msix_entries = devm_kcalloc(dev, v_wanted, in ice_ena_msix_range()
4060 sizeof(*pf->msix_entries), GFP_KERNEL); in ice_ena_msix_range()
4061 if (!pf->msix_entries) { in ice_ena_msix_range()
4062 err = -ENOMEM; in ice_ena_msix_range()
4067 pf->msix_entries[i].entry = i; in ice_ena_msix_range()
4070 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, in ice_ena_msix_range()
4073 dev_err(dev, "unable to reserve MSI-X vectors\n"); in ice_ena_msix_range()
4079 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", in ice_ena_msix_range()
4084 pci_disable_msix(pf->pdev); in ice_ena_msix_range()
4085 err = -ERANGE; in ice_ena_msix_range()
4088 int v_remain = v_actual - v_other; in ice_ena_msix_range()
4095 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", in ice_ena_msix_range()
4096 pf->num_lan_msix); in ice_ena_msix_range()
4099 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", in ice_ena_msix_range()
4100 pf->num_rdma_msix); in ice_ena_msix_range()
4107 devm_kfree(dev, pf->msix_entries); in ice_ena_msix_range()
4110 pf->num_rdma_msix = 0; in ice_ena_msix_range()
4111 pf->num_lan_msix = 0; in ice_ena_msix_range()
4116 * ice_dis_msix - Disable MSI-X interrupt setup in OS
4121 pci_disable_msix(pf->pdev); in ice_dis_msix()
4122 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); in ice_dis_msix()
4123 pf->msix_entries = NULL; in ice_dis_msix()
4127 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
4134 if (pf->irq_tracker) { in ice_clear_interrupt_scheme()
4135 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); in ice_clear_interrupt_scheme()
4136 pf->irq_tracker = NULL; in ice_clear_interrupt_scheme()
4141 * ice_init_interrupt_scheme - Determine proper interrupt scheme
4154 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), in ice_init_interrupt_scheme()
4155 struct_size(pf->irq_tracker, list, vectors), in ice_init_interrupt_scheme()
4157 if (!pf->irq_tracker) { in ice_init_interrupt_scheme()
4159 return -ENOMEM; in ice_init_interrupt_scheme()
4163 pf->num_avail_sw_msix = (u16)vectors; in ice_init_interrupt_scheme()
4164 pf->irq_tracker->num_entries = (u16)vectors; in ice_init_interrupt_scheme()
4165 pf->irq_tracker->end = pf->irq_tracker->num_entries; in ice_init_interrupt_scheme()
4171 * ice_is_wol_supported - check if WoL is supported
4187 return !(BIT(hw->port_info->lport) & wol_ctrl); in ice_is_wol_supported()
4191 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4196 * Only change the number of queues if new_tx, or new_rx is non-0.
4202 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs()
4203 int err = 0, timeout = 50; in ice_vsi_recfg_qs() local
4206 return -EINVAL; in ice_vsi_recfg_qs()
4208 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_vsi_recfg_qs()
4209 timeout--; in ice_vsi_recfg_qs()
4210 if (!timeout) in ice_vsi_recfg_qs()
4211 return -EBUSY; in ice_vsi_recfg_qs()
4216 vsi->req_txq = (u16)new_tx; in ice_vsi_recfg_qs()
4218 vsi->req_rxq = (u16)new_rx; in ice_vsi_recfg_qs()
4221 if (!netif_running(vsi->netdev)) { in ice_vsi_recfg_qs()
4232 clear_bit(ICE_CFG_BUSY, pf->state); in ice_vsi_recfg_qs()
4237 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4257 hw = &pf->hw; in ice_set_safe_mode_vlan_cfg()
4258 ctxt->info = vsi->info; in ice_set_safe_mode_vlan_cfg()
4260 ctxt->info.valid_sections = in ice_set_safe_mode_vlan_cfg()
4265 /* disable VLAN anti-spoof */ in ice_set_safe_mode_vlan_cfg()
4266 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << in ice_set_safe_mode_vlan_cfg()
4270 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_set_safe_mode_vlan_cfg()
4273 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | in ice_set_safe_mode_vlan_cfg()
4276 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_set_safe_mode_vlan_cfg()
4278 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", in ice_set_safe_mode_vlan_cfg()
4279 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_set_safe_mode_vlan_cfg()
4281 vsi->info.sec_flags = ctxt->info.sec_flags; in ice_set_safe_mode_vlan_cfg()
4282 vsi->info.sw_flags2 = ctxt->info.sw_flags2; in ice_set_safe_mode_vlan_cfg()
4283 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_set_safe_mode_vlan_cfg()
4290 * ice_log_pkg_init - log result of DDP package load
4296 struct ice_pf *pf = hw->back; in ice_log_pkg_init()
4304 hw->active_pkg_name, in ice_log_pkg_init()
4305 hw->active_pkg_ver.major, in ice_log_pkg_init()
4306 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4307 hw->active_pkg_ver.update, in ice_log_pkg_init()
4308 hw->active_pkg_ver.draft); in ice_log_pkg_init()
4312 hw->active_pkg_name, in ice_log_pkg_init()
4313 hw->active_pkg_ver.major, in ice_log_pkg_init()
4314 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4315 hw->active_pkg_ver.update, in ice_log_pkg_init()
4316 hw->active_pkg_ver.draft); in ice_log_pkg_init()
4320 hw->active_pkg_name, in ice_log_pkg_init()
4321 hw->active_pkg_ver.major, in ice_log_pkg_init()
4322 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4327 hw->active_pkg_name, in ice_log_pkg_init()
4328 hw->active_pkg_ver.major, in ice_log_pkg_init()
4329 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4330 hw->active_pkg_ver.update, in ice_log_pkg_init()
4331 hw->active_pkg_ver.draft, in ice_log_pkg_init()
4332 hw->pkg_name, in ice_log_pkg_init()
4333 hw->pkg_ver.major, in ice_log_pkg_init()
4334 hw->pkg_ver.minor, in ice_log_pkg_init()
4335 hw->pkg_ver.update, in ice_log_pkg_init()
4336 hw->pkg_ver.draft); in ice_log_pkg_init()
4371 * ice_load_pkg - load/reload the DDP Package file
4383 struct ice_hw *hw = &pf->hw; in ice_load_pkg()
4386 if (firmware && !hw->pkg_copy) { in ice_load_pkg()
4387 state = ice_copy_and_init_pkg(hw, firmware->data, in ice_load_pkg()
4388 firmware->size); in ice_load_pkg()
4390 } else if (!firmware && hw->pkg_copy) { in ice_load_pkg()
4392 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); in ice_load_pkg()
4400 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4404 /* Successful download package is the precondition for advanced in ice_load_pkg()
4407 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4411 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4420 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) in ice_verify_cacheline_size()
4426 * ice_send_version - update firmware with driver version
4441 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); in ice_send_version()
4445 * ice_init_fdir - Initialize flow director VSI and configuration
4459 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); in ice_init_fdir()
4462 return -ENOMEM; in ice_init_fdir()
4471 mutex_init(&pf->hw.fdir_fltr_lock); in ice_init_fdir()
4480 ice_fdir_release_flows(&pf->hw); in ice_init_fdir()
4484 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_init_fdir()
4485 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4486 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_init_fdir()
4492 * ice_get_opt_fw_name - return optional firmware file name or NULL
4498 * followed by a EUI-64 identifier (PCIe Device Serial Number) in ice_get_opt_fw_name()
4500 struct pci_dev *pdev = pf->pdev; in ice_get_opt_fw_name()
4515 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", in ice_get_opt_fw_name()
4522 * ice_request_fw - Device initialization routine
4532 /* optional device-specific DDP (if present) overrides the default DDP in ice_request_fw()
4563 * ice_print_wake_reason - show the wake up cause in the log
4568 u32 wus = pf->wakeup_reason; in ice_print_wake_reason()
4590 * ice_register_netdev - register netdev and devlink port
4599 if (!vsi || !vsi->netdev) in ice_register_netdev()
4600 return -EIO; in ice_register_netdev()
4606 err = register_netdev(vsi->netdev); in ice_register_netdev()
4610 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_register_netdev()
4611 netif_carrier_off(vsi->netdev); in ice_register_netdev()
4612 netif_tx_stop_all_queues(vsi->netdev); in ice_register_netdev()
4614 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); in ice_register_netdev()
4620 free_netdev(vsi->netdev); in ice_register_netdev()
4621 vsi->netdev = NULL; in ice_register_netdev()
4622 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_register_netdev()
4627 * ice_probe - Device initialization routine
4636 struct device *dev = &pdev->dev; in ice_probe()
4641 if (pdev->is_virtfn) { in ice_probe()
4643 return -EINVAL; in ice_probe()
4647 * Documentation/driver-api/driver-model/devres.rst in ice_probe()
4661 return -ENOMEM; in ice_probe()
4664 pf->aux_idx = -1; in ice_probe()
4676 pf->pdev = pdev; in ice_probe()
4678 set_bit(ICE_DOWN, pf->state); in ice_probe()
4680 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
4682 hw = &pf->hw; in ice_probe()
4683 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; in ice_probe()
4686 hw->back = pf; in ice_probe()
4687 hw->vendor_id = pdev->vendor; in ice_probe()
4688 hw->device_id = pdev->device; in ice_probe()
4689 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); in ice_probe()
4690 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ice_probe()
4691 hw->subsystem_device_id = pdev->subsystem_device; in ice_probe()
4692 hw->bus.device = PCI_SLOT(pdev->devfn); in ice_probe()
4693 hw->bus.func = PCI_FUNC(pdev->devfn); in ice_probe()
4696 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); in ice_probe()
4699 if (debug < -1) in ice_probe()
4700 hw->debug_mask = debug; in ice_probe()
4706 err = -EIO; in ice_probe()
4715 * set in pf->state, which will cause ice_is_safe_mode to return in ice_probe()
4735 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; in ice_probe()
4736 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; in ice_probe()
4737 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in ice_probe()
4738 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; in ice_probe()
4740 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { in ice_probe()
4741 pf->hw.udp_tunnel_nic.tables[i].n_entries = in ice_probe()
4742 pf->hw.tnl.valid_count[TNL_VXLAN]; in ice_probe()
4743 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = in ice_probe()
4747 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { in ice_probe()
4748 pf->hw.udp_tunnel_nic.tables[i].n_entries = in ice_probe()
4749 pf->hw.tnl.valid_count[TNL_GENEVE]; in ice_probe()
4750 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = in ice_probe()
4755 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; in ice_probe()
4756 if (!pf->num_alloc_vsi) { in ice_probe()
4757 err = -EIO; in ice_probe()
4760 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in ice_probe()
4761 dev_warn(&pf->pdev->dev, in ice_probe()
4763 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in ice_probe()
4764 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in ice_probe()
4767 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_probe()
4769 if (!pf->vsi) { in ice_probe()
4770 err = -ENOMEM; in ice_probe()
4777 err = -EIO; in ice_probe()
4793 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); in ice_probe()
4794 if (!pf->first_sw) { in ice_probe()
4795 err = -ENOMEM; in ice_probe()
4799 if (hw->evb_veb) in ice_probe()
4800 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; in ice_probe()
4802 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; in ice_probe()
4804 pf->first_sw->pf = pf; in ice_probe()
4807 pf->first_sw->sw_id = hw->port_info->sw_id; in ice_probe()
4815 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
4826 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_probe()
4828 err = ice_init_link_events(pf->hw.port_info); in ice_probe()
4835 err = ice_init_nvm_phy_type(pf->hw.port_info); in ice_probe()
4840 err = ice_update_link_info(pf->hw.port_info); in ice_probe()
4844 ice_init_link_dflt_override(pf->hw.port_info); in ice_probe()
4847 pf->hw.port_info->phy.link_info.link_cfg_err); in ice_probe()
4850 if (pf->hw.port_info->phy.link_info.link_info & in ice_probe()
4853 err = ice_init_phy_user_cfg(pf->hw.port_info); in ice_probe()
4857 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { in ice_probe()
4864 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_probe()
4870 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_probe()
4887 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_probe()
4893 /* Note: Flow director init failure is non-fatal to load */ in ice_probe()
4897 /* Note: DCB init failure is non-fatal to load */ in ice_probe()
4899 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_probe()
4900 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); in ice_probe()
4902 ice_cfg_lldp_mib_change(&pf->hw, true); in ice_probe()
4909 pcie_print_link_status(pf->pdev); in ice_probe()
4921 clear_bit(ICE_DOWN, pf->state); in ice_probe()
4923 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); in ice_probe()
4924 if (pf->aux_idx < 0) { in ice_probe()
4926 err = -ENOMEM; in ice_probe()
4933 err = -EIO; in ice_probe()
4944 pf->adev = NULL; in ice_probe()
4945 ida_free(&ice_aux_ida, pf->aux_idx); in ice_probe()
4952 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
4953 set_bit(ICE_DOWN, pf->state); in ice_probe()
4954 devm_kfree(dev, pf->first_sw); in ice_probe()
4960 devm_kfree(dev, pf->vsi); in ice_probe()
4972 * ice_set_wake - enable or disable Wake on LAN
4979 struct ice_hw *hw = &pf->hw; in ice_set_wake()
4980 bool wol = pf->wol_ena; in ice_set_wake()
4993 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5003 struct ice_hw *hw = &pf->hw; in ice_setup_mc_magic_wake()
5009 if (!pf->wol_ena) in ice_setup_mc_magic_wake()
5017 if (vsi->netdev) in ice_setup_mc_magic_wake()
5018 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); in ice_setup_mc_magic_wake()
5020 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_setup_mc_magic_wake()
5029 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_setup_mc_magic_wake()
5033 * ice_remove - Device removal routine
5043 if (!ice_is_reset_in_progress(pf->state)) in ice_remove()
5050 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { in ice_remove()
5051 set_bit(ICE_VF_RESETS_DISABLED, pf->state); in ice_remove()
5059 if (pf->aux_idx >= 0) in ice_remove()
5060 ida_free(&ice_aux_ida, pf->aux_idx); in ice_remove()
5062 set_bit(ICE_DOWN, pf->state); in ice_remove()
5065 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_remove()
5073 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); in ice_remove()
5077 if (!pf->vsi[i]) in ice_remove()
5079 ice_vsi_free_q_vectors(pf->vsi[i]); in ice_remove()
5083 ice_deinit_hw(&pf->hw); in ice_remove()
5089 ice_reset(&pf->hw, ICE_RESET_PFR); in ice_remove()
5097 * ice_shutdown - PCI callback for shutting down device
5107 pci_wake_from_d3(pdev, pf->wol_ena); in ice_shutdown()
5114 * ice_prepare_for_shutdown - prep for PCI shutdown
5121 struct ice_hw *hw = &pf->hw; in ice_prepare_for_shutdown()
5125 if (ice_check_sq_alive(hw, &hw->mailboxq)) in ice_prepare_for_shutdown()
5134 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5135 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5141 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5147 * This should be called during resume routine to re-allocate the q_vectors
5161 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); in ice_reinit_interrupt_scheme()
5165 /* Remap vectors and rings, after successful re-init interrupts */ in ice_reinit_interrupt_scheme()
5167 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5170 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5173 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5186 while (v--) in ice_reinit_interrupt_scheme()
5187 if (pf->vsi[v]) in ice_reinit_interrupt_scheme()
5188 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5210 return -EBUSY; in ice_suspend()
5224 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { in ice_suspend()
5230 if (test_bit(ICE_DOWN, pf->state) || in ice_suspend()
5231 ice_is_reset_in_progress(pf->state)) { in ice_suspend()
5251 if (!pf->vsi[v]) in ice_suspend()
5253 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5258 pci_wake_from_d3(pdev, pf->wol_ena); in ice_suspend()
5264 * ice_resume - PM callback for waking up from D3
5280 return -ENODEV; in ice_resume()
5289 hw = &pf->hw; in ice_resume()
5291 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_resume()
5301 clear_bit(ICE_DOWN, pf->state); in ice_resume()
5304 /* re-enable service task for reset, but allow reset to schedule it */ in ice_resume()
5305 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_resume()
5310 clear_bit(ICE_SUSPENDED, pf->state); in ice_resume()
5314 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_resume()
5321 * ice_pci_err_detected - warning that PCI error has been detected
5334 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", in ice_pci_err_detected()
5339 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_detected()
5342 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_detected()
5343 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_detected()
5352 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5367 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", in ice_pci_err_slot_reset()
5377 reg = rd32(&pf->hw, GLGEN_RTRIG); in ice_pci_err_slot_reset()
5388 * ice_pci_err_resume - restart operations after PCI error recovery
5399 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", in ice_pci_err_resume()
5404 if (test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_resume()
5405 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", in ice_pci_err_resume()
5414 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_pci_err_resume()
5418 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5425 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_reset_prepare()
5428 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_reset_prepare()
5429 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_reset_prepare()
5436 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5444 /* ice_pci_tbl - PCI Device ID Table
5508 * ice_module_init - Driver registration routine
5523 return -ENOMEM; in ice_module_init()
5537 * ice_module_exit - Driver exit cleanup routine
5551 * ice_set_mac_address - NDO callback to set MAC address
5560 struct ice_vsi *vsi = np->vsi; in ice_set_mac_address()
5561 struct ice_pf *pf = vsi->back; in ice_set_mac_address()
5562 struct ice_hw *hw = &pf->hw; in ice_set_mac_address()
5569 mac = (u8 *)addr->sa_data; in ice_set_mac_address()
5572 return -EADDRNOTAVAIL; in ice_set_mac_address()
5574 if (ether_addr_equal(netdev->dev_addr, mac)) { in ice_set_mac_address()
5579 if (test_bit(ICE_DOWN, pf->state) || in ice_set_mac_address()
5580 ice_is_reset_in_progress(pf->state)) { in ice_set_mac_address()
5583 return -EBUSY; in ice_set_mac_address()
5587 …netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try ag… in ice_set_mac_address()
5589 return -EAGAIN; in ice_set_mac_address()
5593 ether_addr_copy(old_mac, netdev->dev_addr); in ice_set_mac_address()
5600 if (err && err != -ENOENT) { in ice_set_mac_address()
5601 err = -EADDRNOTAVAIL; in ice_set_mac_address()
5607 if (err == -EEXIST) { in ice_set_mac_address()
5618 err = -EADDRNOTAVAIL; in ice_set_mac_address()
5631 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", in ice_set_mac_address()
5632 netdev->dev_addr); in ice_set_mac_address()
5645 * ice_set_rx_mode - NDO callback to set the netdev filters
5651 struct ice_vsi *vsi = np->vsi; in ice_set_rx_mode()
5660 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
5661 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
5662 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); in ice_set_rx_mode()
5667 ice_service_task_schedule(vsi->back); in ice_set_rx_mode()
5671 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5680 struct ice_vsi *vsi = np->vsi; in ice_set_tx_maxrate()
5689 return -EINVAL; in ice_set_tx_maxrate()
5692 q_handle = vsi->tx_rings[queue_index]->q_handle; in ice_set_tx_maxrate()
5697 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
5700 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
5710 * ice_fdb_add - add an entry to the hardware database
5728 return -EINVAL; in ice_fdb_add()
5730 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { in ice_fdb_add()
5732 return -EINVAL; in ice_fdb_add()
5740 err = -EINVAL; in ice_fdb_add()
5743 if (err == -EEXIST && !(flags & NLM_F_EXCL)) in ice_fdb_add()
5750 * ice_fdb_del - delete an entry from the hardware database
5765 if (ndm->ndm_state & NUD_PERMANENT) { in ice_fdb_del()
5767 return -EINVAL; in ice_fdb_del()
5775 err = -EINVAL; in ice_fdb_del()
5792 * ice_fix_features - fix the netdev features flags based on device limitations
5835 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; in ice_fix_features()
5844 if (ice_is_dvm_ena(&np->vsi->back->hw)) { in ice_fix_features()
5874 if (!(netdev->features & NETIF_F_RXFCS) && in ice_fix_features()
5877 !ice_vsi_has_non_zero_vlans(np->vsi)) { in ice_fix_features()
5886 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
5915 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
5917 strip_err = vlan_ops->dis_stripping(vsi); in ice_set_vlan_offload_features()
5920 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
5922 insert_err = vlan_ops->dis_insertion(vsi); in ice_set_vlan_offload_features()
5925 return -EIO; in ice_set_vlan_offload_features()
5931 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
5949 err = vlan_ops->ena_rx_filtering(vsi); in ice_set_vlan_filtering_features()
5951 err = vlan_ops->dis_rx_filtering(vsi); in ice_set_vlan_filtering_features()
5957 * ice_set_vlan_features - set VLAN settings based on suggested feature set
5969 struct ice_vsi *vsi = np->vsi; in ice_set_vlan_features()
5972 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; in ice_set_vlan_features()
5977 dev_err(ice_pf_to_dev(vsi->back), in ice_set_vlan_features()
5979 return -EIO; in ice_set_vlan_features()
5987 current_vlan_features = netdev->features & in ice_set_vlan_features()
6000 * ice_set_loopback - turn on/off loopback mode on underlying PF
6006 bool if_running = netif_running(vsi->netdev); in ice_set_loopback()
6009 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { in ice_set_loopback()
6012 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); in ice_set_loopback()
6016 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in ice_set_loopback()
6018 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in ice_set_loopback()
6026 * ice_set_features - set the netdev feature flags
6033 netdev_features_t changed = netdev->features ^ features; in ice_set_features()
6035 struct ice_vsi *vsi = np->vsi; in ice_set_features()
6036 struct ice_pf *pf = vsi->back; in ice_set_features()
6042 "Device is in Safe Mode - not enabling advanced netdev features\n"); in ice_set_features()
6047 if (ice_is_reset_in_progress(pf->state)) { in ice_set_features()
6050 return -EBUSY; in ice_set_features()
6069 dev_err(ice_pf_to_dev(vsi->back), in ice_set_features()
6071 return -EIO; in ice_set_features()
6090 return -EACCES; in ice_set_features()
6096 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : in ice_set_features()
6097 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_set_features()
6107 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6114 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6118 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6126 * ice_vsi_cfg - Setup the VSI
6135 if (vsi->netdev) { in ice_vsi_cfg()
6136 ice_set_rx_mode(vsi->netdev); in ice_vsi_cfg()
6138 if (vsi->type != ICE_VSI_LB) { in ice_vsi_cfg()
6160 * which is hard-coded to a limit of 250,000 ints/second.
6162 * by ethtool rx-usecs-high.
6201 rc = (struct ice_ring_container *)dim->priv; in ice_tx_dim_work()
6203 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); in ice_tx_dim_work()
6206 itr = tx_profile[dim->profile_ix].itr; in ice_tx_dim_work()
6211 dim->state = DIM_START_MEASURE; in ice_tx_dim_work()
6221 rc = (struct ice_ring_container *)dim->priv; in ice_rx_dim_work()
6223 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); in ice_rx_dim_work()
6226 itr = rx_profile[dim->profile_ix].itr; in ice_rx_dim_work()
6231 dim->state = DIM_START_MEASURE; in ice_rx_dim_work()
6237 * ice_init_moderation - set up interrupt moderation
6251 rc = &q_vector->tx; in ice_init_moderation()
6252 INIT_WORK(&rc->dim.work, ice_tx_dim_work); in ice_init_moderation()
6253 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ice_init_moderation()
6254 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; in ice_init_moderation()
6255 rc->dim.priv = rc; in ice_init_moderation()
6260 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); in ice_init_moderation()
6262 rc = &q_vector->rx; in ice_init_moderation()
6263 INIT_WORK(&rc->dim.work, ice_rx_dim_work); in ice_init_moderation()
6264 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ice_init_moderation()
6265 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; in ice_init_moderation()
6266 rc->dim.priv = rc; in ice_init_moderation()
6270 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : in ice_init_moderation()
6271 rc->itr_setting); in ice_init_moderation()
6277 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6284 if (!vsi->netdev) in ice_napi_enable_all()
6288 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_enable_all()
6292 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) in ice_napi_enable_all()
6293 napi_enable(&q_vector->napi); in ice_napi_enable_all()
6298 * ice_up_complete - Finish the last steps of bringing up a connection
6305 struct ice_pf *pf = vsi->back; in ice_up_complete()
6318 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_up_complete()
6322 if (vsi->port_info && in ice_up_complete()
6323 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && in ice_up_complete()
6324 vsi->netdev) { in ice_up_complete()
6326 netif_tx_start_all_queues(vsi->netdev); in ice_up_complete()
6327 netif_carrier_on(vsi->netdev); in ice_up_complete()
6328 if (!ice_is_e810(&pf->hw)) in ice_up_complete()
6329 ice_ptp_link_change(pf, pf->hw.pf_id, true); in ice_up_complete()
6342 * ice_up - Bring the connection back up after being down
6357 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6380 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6400 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); in ice_update_vsi_tx_ring_stats()
6401 vsi_stats->tx_packets += pkts; in ice_update_vsi_tx_ring_stats()
6402 vsi_stats->tx_bytes += bytes; in ice_update_vsi_tx_ring_stats()
6403 vsi->tx_restart += ring->tx_stats.restart_q; in ice_update_vsi_tx_ring_stats()
6404 vsi->tx_busy += ring->tx_stats.tx_busy; in ice_update_vsi_tx_ring_stats()
6405 vsi->tx_linearize += ring->tx_stats.tx_linearize; in ice_update_vsi_tx_ring_stats()
6410 * ice_update_vsi_ring_stats - Update VSI stats counters
6423 /* reset non-netdev (extended) stats */ in ice_update_vsi_ring_stats()
6424 vsi->tx_restart = 0; in ice_update_vsi_ring_stats()
6425 vsi->tx_busy = 0; in ice_update_vsi_ring_stats()
6426 vsi->tx_linearize = 0; in ice_update_vsi_ring_stats()
6427 vsi->rx_buf_failed = 0; in ice_update_vsi_ring_stats()
6428 vsi->rx_page_failed = 0; in ice_update_vsi_ring_stats()
6433 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, in ice_update_vsi_ring_stats()
6434 vsi->num_txq); in ice_update_vsi_ring_stats()
6438 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); in ice_update_vsi_ring_stats()
6440 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); in ice_update_vsi_ring_stats()
6441 vsi_stats->rx_packets += pkts; in ice_update_vsi_ring_stats()
6442 vsi_stats->rx_bytes += bytes; in ice_update_vsi_ring_stats()
6443 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; in ice_update_vsi_ring_stats()
6444 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; in ice_update_vsi_ring_stats()
6449 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, in ice_update_vsi_ring_stats()
6450 vsi->num_xdp_txq); in ice_update_vsi_ring_stats()
6454 vsi->net_stats.tx_packets = vsi_stats->tx_packets; in ice_update_vsi_ring_stats()
6455 vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; in ice_update_vsi_ring_stats()
6456 vsi->net_stats.rx_packets = vsi_stats->rx_packets; in ice_update_vsi_ring_stats()
6457 vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; in ice_update_vsi_ring_stats()
6463 * ice_update_vsi_stats - Update VSI stats counters
6468 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; in ice_update_vsi_stats()
6469 struct ice_eth_stats *cur_es = &vsi->eth_stats; in ice_update_vsi_stats()
6470 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats()
6472 if (test_bit(ICE_VSI_DOWN, vsi->state) || in ice_update_vsi_stats()
6473 test_bit(ICE_CFG_BUSY, pf->state)) in ice_update_vsi_stats()
6482 cur_ns->tx_errors = cur_es->tx_errors; in ice_update_vsi_stats()
6483 cur_ns->rx_dropped = cur_es->rx_discards; in ice_update_vsi_stats()
6484 cur_ns->tx_dropped = cur_es->tx_discards; in ice_update_vsi_stats()
6485 cur_ns->multicast = cur_es->rx_multicast; in ice_update_vsi_stats()
6488 if (vsi->type == ICE_VSI_PF) { in ice_update_vsi_stats()
6489 cur_ns->rx_crc_errors = pf->stats.crc_errors; in ice_update_vsi_stats()
6490 cur_ns->rx_errors = pf->stats.crc_errors + in ice_update_vsi_stats()
6491 pf->stats.illegal_bytes + in ice_update_vsi_stats()
6492 pf->stats.rx_len_errors + in ice_update_vsi_stats()
6493 pf->stats.rx_undersize + in ice_update_vsi_stats()
6494 pf->hw_csum_rx_error + in ice_update_vsi_stats()
6495 pf->stats.rx_jabber + in ice_update_vsi_stats()
6496 pf->stats.rx_fragments + in ice_update_vsi_stats()
6497 pf->stats.rx_oversize; in ice_update_vsi_stats()
6498 cur_ns->rx_length_errors = pf->stats.rx_len_errors; in ice_update_vsi_stats()
6500 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; in ice_update_vsi_stats()
6505 * ice_update_pf_stats - Update PF port stats counters
6511 struct ice_hw *hw = &pf->hw; in ice_update_pf_stats()
6515 port = hw->port_info->lport; in ice_update_pf_stats()
6516 prev_ps = &pf->stats_prev; in ice_update_pf_stats()
6517 cur_ps = &pf->stats; in ice_update_pf_stats()
6519 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6520 &prev_ps->eth.rx_bytes, in ice_update_pf_stats()
6521 &cur_ps->eth.rx_bytes); in ice_update_pf_stats()
6523 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6524 &prev_ps->eth.rx_unicast, in ice_update_pf_stats()
6525 &cur_ps->eth.rx_unicast); in ice_update_pf_stats()
6527 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6528 &prev_ps->eth.rx_multicast, in ice_update_pf_stats()
6529 &cur_ps->eth.rx_multicast); in ice_update_pf_stats()
6531 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6532 &prev_ps->eth.rx_broadcast, in ice_update_pf_stats()
6533 &cur_ps->eth.rx_broadcast); in ice_update_pf_stats()
6535 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, in ice_update_pf_stats()
6536 &prev_ps->eth.rx_discards, in ice_update_pf_stats()
6537 &cur_ps->eth.rx_discards); in ice_update_pf_stats()
6539 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6540 &prev_ps->eth.tx_bytes, in ice_update_pf_stats()
6541 &cur_ps->eth.tx_bytes); in ice_update_pf_stats()
6543 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6544 &prev_ps->eth.tx_unicast, in ice_update_pf_stats()
6545 &cur_ps->eth.tx_unicast); in ice_update_pf_stats()
6547 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6548 &prev_ps->eth.tx_multicast, in ice_update_pf_stats()
6549 &cur_ps->eth.tx_multicast); in ice_update_pf_stats()
6551 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6552 &prev_ps->eth.tx_broadcast, in ice_update_pf_stats()
6553 &cur_ps->eth.tx_broadcast); in ice_update_pf_stats()
6555 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6556 &prev_ps->tx_dropped_link_down, in ice_update_pf_stats()
6557 &cur_ps->tx_dropped_link_down); in ice_update_pf_stats()
6559 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6560 &prev_ps->rx_size_64, &cur_ps->rx_size_64); in ice_update_pf_stats()
6562 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6563 &prev_ps->rx_size_127, &cur_ps->rx_size_127); in ice_update_pf_stats()
6565 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6566 &prev_ps->rx_size_255, &cur_ps->rx_size_255); in ice_update_pf_stats()
6568 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6569 &prev_ps->rx_size_511, &cur_ps->rx_size_511); in ice_update_pf_stats()
6571 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6572 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); in ice_update_pf_stats()
6574 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6575 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); in ice_update_pf_stats()
6577 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6578 &prev_ps->rx_size_big, &cur_ps->rx_size_big); in ice_update_pf_stats()
6580 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6581 &prev_ps->tx_size_64, &cur_ps->tx_size_64); in ice_update_pf_stats()
6583 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6584 &prev_ps->tx_size_127, &cur_ps->tx_size_127); in ice_update_pf_stats()
6586 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6587 &prev_ps->tx_size_255, &cur_ps->tx_size_255); in ice_update_pf_stats()
6589 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6590 &prev_ps->tx_size_511, &cur_ps->tx_size_511); in ice_update_pf_stats()
6592 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6593 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); in ice_update_pf_stats()
6595 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6596 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); in ice_update_pf_stats()
6598 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6599 &prev_ps->tx_size_big, &cur_ps->tx_size_big); in ice_update_pf_stats()
6601 fd_ctr_base = hw->fd_ctr_base; in ice_update_pf_stats()
6605 pf->stat_prev_loaded, &prev_ps->fd_sb_match, in ice_update_pf_stats()
6606 &cur_ps->fd_sb_match); in ice_update_pf_stats()
6607 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6608 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); in ice_update_pf_stats()
6610 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6611 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); in ice_update_pf_stats()
6613 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6614 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); in ice_update_pf_stats()
6616 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6617 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); in ice_update_pf_stats()
6621 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6622 &prev_ps->crc_errors, &cur_ps->crc_errors); in ice_update_pf_stats()
6624 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6625 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); in ice_update_pf_stats()
6627 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6628 &prev_ps->mac_local_faults, in ice_update_pf_stats()
6629 &cur_ps->mac_local_faults); in ice_update_pf_stats()
6631 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6632 &prev_ps->mac_remote_faults, in ice_update_pf_stats()
6633 &cur_ps->mac_remote_faults); in ice_update_pf_stats()
6635 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6636 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); in ice_update_pf_stats()
6638 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6639 &prev_ps->rx_undersize, &cur_ps->rx_undersize); in ice_update_pf_stats()
6641 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6642 &prev_ps->rx_fragments, &cur_ps->rx_fragments); in ice_update_pf_stats()
6644 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6645 &prev_ps->rx_oversize, &cur_ps->rx_oversize); in ice_update_pf_stats()
6647 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
6648 &prev_ps->rx_jabber, &cur_ps->rx_jabber); in ice_update_pf_stats()
6650 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; in ice_update_pf_stats()
6652 pf->stat_prev_loaded = true; in ice_update_pf_stats()
6656 * ice_get_stats64 - get statistics for network device structure
6665 struct ice_vsi *vsi = np->vsi; in ice_get_stats64()
6667 vsi_stats = &vsi->net_stats; in ice_get_stats64()
6669 if (!vsi->num_txq || !vsi->num_rxq) in ice_get_stats64()
6677 if (!test_bit(ICE_VSI_DOWN, vsi->state)) in ice_get_stats64()
6679 stats->tx_packets = vsi_stats->tx_packets; in ice_get_stats64()
6680 stats->tx_bytes = vsi_stats->tx_bytes; in ice_get_stats64()
6681 stats->rx_packets = vsi_stats->rx_packets; in ice_get_stats64()
6682 stats->rx_bytes = vsi_stats->rx_bytes; in ice_get_stats64()
6688 stats->multicast = vsi_stats->multicast; in ice_get_stats64()
6689 stats->tx_errors = vsi_stats->tx_errors; in ice_get_stats64()
6690 stats->tx_dropped = vsi_stats->tx_dropped; in ice_get_stats64()
6691 stats->rx_errors = vsi_stats->rx_errors; in ice_get_stats64()
6692 stats->rx_dropped = vsi_stats->rx_dropped; in ice_get_stats64()
6693 stats->rx_crc_errors = vsi_stats->rx_crc_errors; in ice_get_stats64()
6694 stats->rx_length_errors = vsi_stats->rx_length_errors; in ice_get_stats64()
6698 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6705 if (!vsi->netdev) in ice_napi_disable_all()
6709 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_disable_all()
6711 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) in ice_napi_disable_all()
6712 napi_disable(&q_vector->napi); in ice_napi_disable_all()
6714 cancel_work_sync(&q_vector->tx.dim.work); in ice_napi_disable_all()
6715 cancel_work_sync(&q_vector->rx.dim.work); in ice_napi_disable_all()
6720 * ice_down - Shutdown the connection
6723 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6729 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); in ice_down()
6731 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_down()
6733 if (!ice_is_e810(&vsi->back->hw)) in ice_down()
6734 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); in ice_down()
6735 netif_carrier_off(vsi->netdev); in ice_down()
6736 netif_tx_disable(vsi->netdev); in ice_down()
6737 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { in ice_down()
6738 ice_eswitch_stop_all_tx_queues(vsi->back); in ice_down()
6745 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", in ice_down()
6746 vsi->vsi_num, tx_err); in ice_down()
6750 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", in ice_down()
6751 vsi->vsi_num, tx_err); in ice_down()
6756 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", in ice_down()
6757 vsi->vsi_num, rx_err); in ice_down()
6762 ice_clean_tx_ring(vsi->tx_rings[i]); in ice_down()
6765 ice_clean_rx_ring(vsi->rx_rings[i]); in ice_down()
6768 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", in ice_down()
6769 vsi->vsi_num, vsi->vsw->sw_id); in ice_down()
6770 return -EIO; in ice_down()
6777 * ice_down_up - shutdown the VSI connection and bring it up
6785 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_down_up()
6794 …netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to … in ice_down_up()
6802 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6811 if (!vsi->num_txq) { in ice_vsi_setup_tx_rings()
6812 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", in ice_vsi_setup_tx_rings()
6813 vsi->vsi_num); in ice_vsi_setup_tx_rings()
6814 return -EINVAL; in ice_vsi_setup_tx_rings()
6818 struct ice_tx_ring *ring = vsi->tx_rings[i]; in ice_vsi_setup_tx_rings()
6821 return -EINVAL; in ice_vsi_setup_tx_rings()
6823 if (vsi->netdev) in ice_vsi_setup_tx_rings()
6824 ring->netdev = vsi->netdev; in ice_vsi_setup_tx_rings()
6834 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6843 if (!vsi->num_rxq) { in ice_vsi_setup_rx_rings()
6844 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", in ice_vsi_setup_rx_rings()
6845 vsi->vsi_num); in ice_vsi_setup_rx_rings()
6846 return -EINVAL; in ice_vsi_setup_rx_rings()
6850 struct ice_rx_ring *ring = vsi->rx_rings[i]; in ice_vsi_setup_rx_rings()
6853 return -EINVAL; in ice_vsi_setup_rx_rings()
6855 if (vsi->netdev) in ice_vsi_setup_rx_rings()
6856 ring->netdev = vsi->netdev; in ice_vsi_setup_rx_rings()
6866 * ice_vsi_open_ctrl - open control VSI for use
6876 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl()
6894 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", in ice_vsi_open_ctrl()
6906 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_open_ctrl()
6922 * ice_vsi_open - Called when a network interface is made active
6932 struct ice_pf *pf = vsi->back; in ice_vsi_open()
6948 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", in ice_vsi_open()
6949 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
6954 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_vsi_open()
6956 if (vsi->type == ICE_VSI_PF) { in ice_vsi_open()
6958 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); in ice_vsi_open()
6962 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); in ice_vsi_open()
6986 * ice_vsi_release_all - Delete all VSIs
6993 if (!pf->vsi) in ice_vsi_release_all()
6997 if (!pf->vsi[i]) in ice_vsi_release_all()
7000 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7003 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7005 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", in ice_vsi_release_all()
7006 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7011 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7015 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7023 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type()
7025 if (!vsi || vsi->type != type) in ice_vsi_rebuild_by_type()
7032 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7037 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7040 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7044 /* Re-map HW VSI number, using VSI handle that has been in ice_vsi_rebuild_by_type()
7047 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7053 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7057 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, in ice_vsi_rebuild_by_type()
7065 * ice_update_pf_netdev_link - Update PF netdev link status
7074 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link()
7076 if (!vsi || vsi->type != ICE_VSI_PF) in ice_update_pf_netdev_link()
7079 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7081 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7082 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7084 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7085 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7091 * ice_rebuild - rebuild after reset
7103 struct ice_hw *hw = &pf->hw; in ice_rebuild()
7107 if (test_bit(ICE_DOWN, pf->state)) in ice_rebuild()
7118 pf->fw_emp_reset_disabled = false; in ice_rebuild()
7167 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_rebuild()
7171 err = ice_sched_init_port(hw->port_info); in ice_rebuild()
7182 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7187 guar = hw->func_caps.fd_fltr_guar; in ice_rebuild()
7188 b_effort = hw->func_caps.fd_fltr_best_effort; in ice_rebuild()
7197 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_rebuild()
7204 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7218 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7237 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7245 if (hw->fdir_prof) in ice_rebuild()
7267 clear_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7277 set_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7280 set_bit(ICE_NEEDS_RESTART, pf->state); in ice_rebuild()
7285 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
7290 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) in ice_max_xdp_frame_size()
7291 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; in ice_max_xdp_frame_size()
7297 * ice_change_mtu - NDO callback to change the MTU
7306 struct ice_vsi *vsi = np->vsi; in ice_change_mtu()
7307 struct ice_pf *pf = vsi->back; in ice_change_mtu()
7311 if (new_mtu == (int)netdev->mtu) { in ice_change_mtu()
7312 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); in ice_change_mtu()
7321 frame_size - ICE_ETH_PKT_HDR_PAD); in ice_change_mtu()
7322 return -EINVAL; in ice_change_mtu()
7328 if (ice_is_reset_in_progress(pf->state)) { in ice_change_mtu()
7339 return -EBUSY; in ice_change_mtu()
7342 netdev->mtu = (unsigned int)new_mtu; in ice_change_mtu()
7345 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { in ice_change_mtu()
7360 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); in ice_change_mtu()
7366 * ice_eth_ioctl - Access the hwtstamp interface
7374 struct ice_pf *pf = np->vsi->back; in ice_eth_ioctl()
7382 return -EOPNOTSUPP; in ice_eth_ioctl()
7387 * ice_aq_str - convert AQ err code to a string
7429 * ice_set_rss_lut - Set RSS LUT
7439 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_lut()
7443 return -EINVAL; in ice_set_rss_lut()
7445 params.vsi_handle = vsi->idx; in ice_set_rss_lut()
7447 params.lut_type = vsi->rss_lut_type; in ice_set_rss_lut()
7452 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", in ice_set_rss_lut()
7453 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_set_rss_lut()
7459 * ice_set_rss_key - Set RSS key
7467 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_key()
7471 return -EINVAL; in ice_set_rss_key()
7473 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_set_rss_key()
7475 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", in ice_set_rss_key()
7476 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_set_rss_key()
7482 * ice_get_rss_lut - Get RSS LUT
7492 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_lut()
7496 return -EINVAL; in ice_get_rss_lut()
7498 params.vsi_handle = vsi->idx; in ice_get_rss_lut()
7500 params.lut_type = vsi->rss_lut_type; in ice_get_rss_lut()
7505 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", in ice_get_rss_lut()
7506 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_get_rss_lut()
7512 * ice_get_rss_key - Get RSS key
7520 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_key()
7524 return -EINVAL; in ice_get_rss_key()
7526 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_get_rss_key()
7528 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", in ice_get_rss_key()
7529 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_get_rss_key()
7535 * ice_bridge_getlink - Get the hardware bridge mode
7550 struct ice_vsi *vsi = np->vsi; in ice_bridge_getlink()
7551 struct ice_pf *pf = vsi->back; in ice_bridge_getlink()
7554 bmode = pf->first_sw->bridge_mode; in ice_bridge_getlink()
7561 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7570 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_update_bridge_mode()
7574 vsi_props = &vsi->info; in ice_vsi_update_bridge_mode()
7578 return -ENOMEM; in ice_vsi_update_bridge_mode()
7580 ctxt->info = vsi->info; in ice_vsi_update_bridge_mode()
7584 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_update_bridge_mode()
7587 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_update_bridge_mode()
7588 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); in ice_vsi_update_bridge_mode()
7590 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_update_bridge_mode()
7592 …dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\… in ice_vsi_update_bridge_mode()
7593 bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); in ice_vsi_update_bridge_mode()
7597 vsi_props->sw_flags = ctxt->info.sw_flags; in ice_vsi_update_bridge_mode()
7605 * ice_bridge_setlink - Set the hardware bridge mode
7622 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink()
7624 struct ice_hw *hw = &pf->hw; in ice_bridge_setlink()
7628 pf_sw = pf->first_sw; in ice_bridge_setlink()
7639 return -EINVAL; in ice_bridge_setlink()
7641 if (mode == pf_sw->bridge_mode) in ice_bridge_setlink()
7647 if (!pf->vsi[v]) in ice_bridge_setlink()
7649 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
7654 hw->evb_veb = (mode == BRIDGE_MODE_VEB); in ice_bridge_setlink()
7662 ice_aq_str(hw->adminq.sq_last_status)); in ice_bridge_setlink()
7663 /* revert hw->evb_veb */ in ice_bridge_setlink()
7664 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); in ice_bridge_setlink()
7668 pf_sw->bridge_mode = mode; in ice_bridge_setlink()
7675 * ice_tx_timeout - Respond to a Tx Hang
7683 struct ice_vsi *vsi = np->vsi; in ice_tx_timeout()
7684 struct ice_pf *pf = vsi->back; in ice_tx_timeout()
7687 pf->tx_timeout_count++; in ice_tx_timeout()
7690 * to. If yes then Tx timeout is not caused by a hung queue, no in ice_tx_timeout()
7694 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", in ice_tx_timeout()
7701 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_tx_timeout()
7702 if (txqueue == vsi->tx_rings[i]->q_index) { in ice_tx_timeout()
7703 tx_ring = vsi->tx_rings[i]; in ice_tx_timeout()
7707 /* Reset recovery level if enough time has elapsed after last timeout. in ice_tx_timeout()
7708 * Also ensure no new reset action happens before next timeout period. in ice_tx_timeout()
7710 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) in ice_tx_timeout()
7711 pf->tx_timeout_recovery_level = 1; in ice_tx_timeout()
7712 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + in ice_tx_timeout()
7713 netdev->watchdog_timeo))) in ice_tx_timeout()
7717 struct ice_hw *hw = &pf->hw; in ice_tx_timeout()
7720 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & in ice_tx_timeout()
7723 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); in ice_tx_timeout()
7726 vsi->vsi_num, txqueue, tx_ring->next_to_clean, in ice_tx_timeout()
7727 head, tx_ring->next_to_use, val); in ice_tx_timeout()
7730 pf->tx_timeout_last_recovery = jiffies; in ice_tx_timeout()
7732 pf->tx_timeout_recovery_level, txqueue); in ice_tx_timeout()
7734 switch (pf->tx_timeout_recovery_level) { in ice_tx_timeout()
7736 set_bit(ICE_PFR_REQ, pf->state); in ice_tx_timeout()
7739 set_bit(ICE_CORER_REQ, pf->state); in ice_tx_timeout()
7742 set_bit(ICE_GLOBR_REQ, pf->state); in ice_tx_timeout()
7746 set_bit(ICE_DOWN, pf->state); in ice_tx_timeout()
7747 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_tx_timeout()
7748 set_bit(ICE_SERVICE_DIS, pf->state); in ice_tx_timeout()
7753 pf->tx_timeout_recovery_level++; in ice_tx_timeout()
7757 * ice_setup_tc_cls_flower - flower classifier offloads
7767 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_cls_flower()
7769 if (cls_flower->common.chain_index) in ice_setup_tc_cls_flower()
7770 return -EOPNOTSUPP; in ice_setup_tc_cls_flower()
7772 switch (cls_flower->command) { in ice_setup_tc_cls_flower()
7778 return -EINVAL; in ice_setup_tc_cls_flower()
7783 * ice_setup_tc_block_cb - callback handler registered for TC block
7795 return ice_setup_tc_cls_flower(np, np->vsi->netdev, in ice_setup_tc_block_cb()
7798 return -EOPNOTSUPP; in ice_setup_tc_block_cb()
7803 * ice_validate_mqprio_qopt - Validate TCF input parameters
7817 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt()
7823 if (vsi->type != ICE_VSI_PF) in ice_validate_mqprio_qopt()
7824 return -EINVAL; in ice_validate_mqprio_qopt()
7826 if (mqprio_qopt->qopt.offset[0] != 0 || in ice_validate_mqprio_qopt()
7827 mqprio_qopt->qopt.num_tc < 1 || in ice_validate_mqprio_qopt()
7828 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) in ice_validate_mqprio_qopt()
7829 return -EINVAL; in ice_validate_mqprio_qopt()
7832 vsi->ch_rss_size = 0; in ice_validate_mqprio_qopt()
7833 num_tc = mqprio_qopt->qopt.num_tc; in ice_validate_mqprio_qopt()
7836 int qcount = mqprio_qopt->qopt.count[i]; in ice_validate_mqprio_qopt()
7840 return -EINVAL; in ice_validate_mqprio_qopt()
7847 return -EINVAL; in ice_validate_mqprio_qopt()
7856 return -EINVAL; in ice_validate_mqprio_qopt()
7861 return -EINVAL; in ice_validate_mqprio_qopt()
7872 max_rate = mqprio_qopt->max_rate[i]; in ice_validate_mqprio_qopt()
7877 min_rate = mqprio_qopt->min_rate[i]; in ice_validate_mqprio_qopt()
7884 return -EINVAL; in ice_validate_mqprio_qopt()
7891 return -EINVAL; in ice_validate_mqprio_qopt()
7898 return -EINVAL; in ice_validate_mqprio_qopt()
7908 return -EINVAL; in ice_validate_mqprio_qopt()
7911 if (i >= mqprio_qopt->qopt.num_tc - 1) in ice_validate_mqprio_qopt()
7913 if (mqprio_qopt->qopt.offset[i + 1] != in ice_validate_mqprio_qopt()
7914 (mqprio_qopt->qopt.offset[i] + qcount)) in ice_validate_mqprio_qopt()
7915 return -EINVAL; in ice_validate_mqprio_qopt()
7917 if (vsi->num_rxq < in ice_validate_mqprio_qopt()
7918 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in ice_validate_mqprio_qopt()
7919 return -EINVAL; in ice_validate_mqprio_qopt()
7920 if (vsi->num_txq < in ice_validate_mqprio_qopt()
7921 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in ice_validate_mqprio_qopt()
7922 return -EINVAL; in ice_validate_mqprio_qopt()
7928 return -EINVAL; in ice_validate_mqprio_qopt()
7933 return -EINVAL; in ice_validate_mqprio_qopt()
7936 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ in ice_validate_mqprio_qopt()
7937 vsi->ch_rss_size = max_rss_q_cnt; in ice_validate_mqprio_qopt()
7943 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
7954 if (!(vsi->num_gfltr || vsi->num_bfltr)) in ice_add_vsi_to_fdir()
7955 return -EINVAL; in ice_add_vsi_to_fdir()
7957 hw = &pf->hw; in ice_add_vsi_to_fdir()
7963 if (!(hw->fdir_prof && hw->fdir_prof[flow] && in ice_add_vsi_to_fdir()
7964 hw->fdir_prof[flow]->cnt)) in ice_add_vsi_to_fdir()
7973 prof = hw->fdir_prof[flow]; in ice_add_vsi_to_fdir()
7976 prof->vsi_h[0], vsi->idx, in ice_add_vsi_to_fdir()
7977 prio, prof->fdir_seg[tun], in ice_add_vsi_to_fdir()
7981 vsi->idx, flow); in ice_add_vsi_to_fdir()
7985 prof->entry_h[prof->cnt][tun] = entry_h; in ice_add_vsi_to_fdir()
7989 prof->vsi_h[prof->cnt] = vsi->idx; in ice_add_vsi_to_fdir()
7990 prof->cnt++; in ice_add_vsi_to_fdir()
7993 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, in ice_add_vsi_to_fdir()
7998 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); in ice_add_vsi_to_fdir()
8004 * ice_add_channel - add a channel by adding VSI
8016 if (ch->type != ICE_VSI_CHNL) { in ice_add_channel()
8017 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); in ice_add_channel()
8018 return -EINVAL; in ice_add_channel()
8021 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8022 if (!vsi || vsi->type != ICE_VSI_CHNL) { in ice_add_channel()
8024 return -EINVAL; in ice_add_channel()
8029 ch->sw_id = sw_id; in ice_add_channel()
8030 ch->vsi_num = vsi->vsi_num; in ice_add_channel()
8031 ch->info.mapping_flags = vsi->info.mapping_flags; in ice_add_channel()
8032 ch->ch_vsi = vsi; in ice_add_channel()
8034 vsi->ch = ch; in ice_add_channel()
8036 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, in ice_add_channel()
8037 sizeof(vsi->info.q_mapping)); in ice_add_channel()
8038 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, in ice_add_channel()
8039 sizeof(vsi->info.tc_mapping)); in ice_add_channel()
8055 for (i = 0; i < ch->num_txq; i++) { in ice_chnl_cfg_res()
8061 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8062 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8067 tx_ring->ch = ch; in ice_chnl_cfg_res()
8068 rx_ring->ch = ch; in ice_chnl_cfg_res()
8071 tx_q_vector = tx_ring->q_vector; in ice_chnl_cfg_res()
8072 rx_q_vector = rx_ring->q_vector; in ice_chnl_cfg_res()
8077 tx_q_vector->ch = ch; in ice_chnl_cfg_res()
8079 rc = &tx_q_vector->tx; in ice_chnl_cfg_res()
8081 ice_write_itr(rc, rc->itr_setting); in ice_chnl_cfg_res()
8084 rx_q_vector->ch = ch; in ice_chnl_cfg_res()
8086 rc = &rx_q_vector->rx; in ice_chnl_cfg_res()
8088 ice_write_itr(rc, rc->itr_setting); in ice_chnl_cfg_res()
8092 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then in ice_chnl_cfg_res()
8093 * GLINT_ITR register would have written to perform in-context in ice_chnl_cfg_res()
8096 if (ch->num_txq || ch->num_rxq) in ice_chnl_cfg_res()
8097 ice_flush(&vsi->back->hw); in ice_chnl_cfg_res()
8101 * ice_cfg_chnl_all_res - configure channel resources
8105 * This function configures channel specific resources such as flow-director
8118 * ice_setup_hw_channel - setup new channel
8135 ch->base_q = vsi->next_base_q; in ice_setup_hw_channel()
8136 ch->type = type; in ice_setup_hw_channel()
8150 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; in ice_setup_hw_channel()
8151 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, in ice_setup_hw_channel()
8152 ch->num_rxq); in ice_setup_hw_channel()
8158 * ice_setup_channel - setup new channel using uplink element
8174 if (vsi->type != ICE_VSI_PF) { in ice_setup_channel()
8175 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); in ice_setup_channel()
8179 sw_id = pf->first_sw->sw_id; in ice_setup_channel()
8189 return ch->ch_vsi ? true : false; in ice_setup_channel()
8193 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8211 * ice_create_q_channel - function to create channel
8220 struct ice_pf *pf = vsi->back; in ice_create_q_channel()
8224 return -EINVAL; in ice_create_q_channel()
8227 if (!ch->num_txq || !ch->num_rxq) { in ice_create_q_channel()
8228 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); in ice_create_q_channel()
8229 return -EINVAL; in ice_create_q_channel()
8232 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { in ice_create_q_channel()
8234 vsi->cnt_q_avail, ch->num_txq); in ice_create_q_channel()
8235 return -EINVAL; in ice_create_q_channel()
8240 return -EINVAL; in ice_create_q_channel()
8243 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { in ice_create_q_channel()
8246 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, in ice_create_q_channel()
8247 ch->min_tx_rate); in ice_create_q_channel()
8250 ch->max_tx_rate, ch->ch_vsi->vsi_num); in ice_create_q_channel()
8253 ch->max_tx_rate, ch->ch_vsi->vsi_num); in ice_create_q_channel()
8256 vsi->cnt_q_avail -= ch->num_txq; in ice_create_q_channel()
8262 * ice_rem_all_chnl_fltrs - removes all channel filters
8263 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8266 * tc-flower based filter
8275 &pf->tc_flower_fltr_list, in ice_rem_all_chnl_fltrs()
8284 rule.rid = fltr->rid; in ice_rem_all_chnl_fltrs()
8285 rule.rule_id = fltr->rule_id; in ice_rem_all_chnl_fltrs()
8286 rule.vsi_handle = fltr->dest_id; in ice_rem_all_chnl_fltrs()
8287 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); in ice_rem_all_chnl_fltrs()
8289 if (status == -ENOENT) in ice_rem_all_chnl_fltrs()
8295 } else if (fltr->dest_vsi) { in ice_rem_all_chnl_fltrs()
8297 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { in ice_rem_all_chnl_fltrs()
8298 u32 flags = fltr->flags; in ice_rem_all_chnl_fltrs()
8300 fltr->dest_vsi->num_chnl_fltr--; in ice_rem_all_chnl_fltrs()
8303 pf->num_dmac_chnl_fltrs--; in ice_rem_all_chnl_fltrs()
8307 hlist_del(&fltr->tc_flower_node); in ice_rem_all_chnl_fltrs()
8313 * ice_remove_q_channels - Remove queue channels for the TCs
8322 struct ice_pf *pf = vsi->back; in ice_remove_q_channels()
8325 /* remove all tc-flower based filter if they are channel filters only */ in ice_remove_q_channels()
8330 if (vsi->netdev->features & NETIF_F_NTUPLE) { in ice_remove_q_channels()
8331 struct ice_hw *hw = &pf->hw; in ice_remove_q_channels()
8333 mutex_lock(&hw->fdir_fltr_lock); in ice_remove_q_channels()
8335 mutex_unlock(&hw->fdir_fltr_lock); in ice_remove_q_channels()
8339 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in ice_remove_q_channels()
8342 list_del(&ch->list); in ice_remove_q_channels()
8343 ch_vsi = ch->ch_vsi; in ice_remove_q_channels()
8350 for (i = 0; i < ch->num_rxq; i++) { in ice_remove_q_channels()
8354 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_remove_q_channels()
8355 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_remove_q_channels()
8357 tx_ring->ch = NULL; in ice_remove_q_channels()
8358 if (tx_ring->q_vector) in ice_remove_q_channels()
8359 tx_ring->q_vector->ch = NULL; in ice_remove_q_channels()
8362 rx_ring->ch = NULL; in ice_remove_q_channels()
8363 if (rx_ring->q_vector) in ice_remove_q_channels()
8364 rx_ring->q_vector->ch = NULL; in ice_remove_q_channels()
8369 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); in ice_remove_q_channels()
8372 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); in ice_remove_q_channels()
8375 ice_vsi_delete(ch->ch_vsi); in ice_remove_q_channels()
8378 ice_vsi_clear(ch->ch_vsi); in ice_remove_q_channels()
8386 vsi->tc_map_vsi[i] = NULL; in ice_remove_q_channels()
8389 vsi->all_enatc = 0; in ice_remove_q_channels()
8390 vsi->all_numtc = 0; in ice_remove_q_channels()
8394 * ice_rebuild_channels - rebuild channel
8413 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || in ice_rebuild_channels()
8414 main_vsi->old_numtc == 1) in ice_rebuild_channels()
8420 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); in ice_rebuild_channels()
8423 main_vsi->old_ena_tc, main_vsi->vsi_num); in ice_rebuild_channels()
8431 vsi = pf->vsi[i]; in ice_rebuild_channels()
8432 if (!vsi || vsi->type != ICE_VSI_CHNL) in ice_rebuild_channels()
8435 type = vsi->type; in ice_rebuild_channels()
8441 ice_vsi_type_str(type), vsi->idx, err); in ice_rebuild_channels()
8445 /* Re-map HW VSI number, using VSI handle that has been in ice_rebuild_channels()
8448 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
8451 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
8454 ice_vsi_type_str(type), err, vsi->idx); in ice_rebuild_channels()
8459 ice_vsi_type_str(type), vsi->idx); in ice_rebuild_channels()
8464 main_vsi->tc_map_vsi[tc_idx++] = vsi; in ice_rebuild_channels()
8470 list_for_each_entry(ch, &main_vsi->ch_list, list) { in ice_rebuild_channels()
8473 ch_vsi = ch->ch_vsi; in ice_rebuild_channels()
8480 /* replay BW rate limit if it is non-zero */ in ice_rebuild_channels()
8481 if (!ch->max_tx_rate && !ch->min_tx_rate) in ice_rebuild_channels()
8484 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, in ice_rebuild_channels()
8485 ch->min_tx_rate); in ice_rebuild_channels()
8488 err, ch->max_tx_rate, ch->min_tx_rate, in ice_rebuild_channels()
8489 ch_vsi->vsi_num); in ice_rebuild_channels()
8492 ch->max_tx_rate, ch->min_tx_rate, in ice_rebuild_channels()
8493 ch_vsi->vsi_num); in ice_rebuild_channels()
8497 if (main_vsi->ch_rss_size) in ice_rebuild_channels()
8508 * ice_create_q_channels - Add queue channel for the given TCs
8515 struct ice_pf *pf = vsi->back; in ice_create_q_channels()
8520 if (!(vsi->all_enatc & BIT(i))) in ice_create_q_channels()
8525 ret = -ENOMEM; in ice_create_q_channels()
8528 INIT_LIST_HEAD(&ch->list); in ice_create_q_channels()
8529 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
8530 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
8531 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; in ice_create_q_channels()
8532 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; in ice_create_q_channels()
8533 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; in ice_create_q_channels()
8536 if (ch->max_tx_rate) in ice_create_q_channels()
8537 ch->max_tx_rate = div_u64(ch->max_tx_rate, in ice_create_q_channels()
8539 if (ch->min_tx_rate) in ice_create_q_channels()
8540 ch->min_tx_rate = div_u64(ch->min_tx_rate, in ice_create_q_channels()
8550 list_add_tail(&ch->list, &vsi->ch_list); in ice_create_q_channels()
8551 vsi->tc_map_vsi[i] = ch->ch_vsi; in ice_create_q_channels()
8553 "successfully created channel: VSI %pK\n", ch->ch_vsi); in ice_create_q_channels()
8564 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8572 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_mqprio_qdisc()
8573 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc()
8581 num_tcf = mqprio_qopt->qopt.num_tc; in ice_setup_tc_mqprio_qdisc()
8582 hw = mqprio_qopt->qopt.hw; in ice_setup_tc_mqprio_qdisc()
8583 mode = mqprio_qopt->mode; in ice_setup_tc_mqprio_qdisc()
8585 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
8586 vsi->ch_rss_size = 0; in ice_setup_tc_mqprio_qdisc()
8587 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
8604 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
8605 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
8610 if (vsi->netdev->features & NETIF_F_HW_TC) in ice_setup_tc_mqprio_qdisc()
8611 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_setup_tc_mqprio_qdisc()
8614 return -EINVAL; in ice_setup_tc_mqprio_qdisc()
8620 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && in ice_setup_tc_mqprio_qdisc()
8627 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_setup_tc_mqprio_qdisc()
8630 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
8631 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
8633 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
8636 /* logic to rebuild VSI, same like ethtool -L */ in ice_setup_tc_mqprio_qdisc()
8643 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_setup_tc_mqprio_qdisc()
8644 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
8645 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
8647 vsi->req_txq = offset + qcount_tx; in ice_setup_tc_mqprio_qdisc()
8648 vsi->req_rxq = offset + qcount_rx; in ice_setup_tc_mqprio_qdisc()
8651 * form ice_vsi_rebuild during tc-qdisc delete stage - to in ice_setup_tc_mqprio_qdisc()
8654 vsi->orig_rss_size = vsi->rss_size; in ice_setup_tc_mqprio_qdisc()
8660 cur_txq = vsi->num_txq; in ice_setup_tc_mqprio_qdisc()
8661 cur_rxq = vsi->num_rxq; in ice_setup_tc_mqprio_qdisc()
8668 vsi->req_txq = cur_txq; in ice_setup_tc_mqprio_qdisc()
8669 vsi->req_rxq = cur_rxq; in ice_setup_tc_mqprio_qdisc()
8670 clear_bit(ICE_RESET_FAILED, pf->state); in ice_setup_tc_mqprio_qdisc()
8677 vsi->all_numtc = num_tcf; in ice_setup_tc_mqprio_qdisc()
8678 vsi->all_enatc = ena_tc_qdisc; in ice_setup_tc_mqprio_qdisc()
8682 vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
8686 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
8687 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; in ice_setup_tc_mqprio_qdisc()
8688 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; in ice_setup_tc_mqprio_qdisc()
8701 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
8704 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
8717 if (vsi->ch_rss_size) in ice_setup_tc_mqprio_qdisc()
8723 vsi->all_numtc = 0; in ice_setup_tc_mqprio_qdisc()
8724 vsi->all_enatc = 0; in ice_setup_tc_mqprio_qdisc()
8739 struct ice_pf *pf = np->vsi->back; in ice_setup_tc()
8750 mutex_lock(&pf->tc_mutex); in ice_setup_tc()
8752 mutex_unlock(&pf->tc_mutex); in ice_setup_tc()
8755 return -EOPNOTSUPP; in ice_setup_tc()
8757 return -EOPNOTSUPP; in ice_setup_tc()
8766 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { in ice_indr_block_priv_lookup()
8767 if (!cb_priv->netdev) in ice_indr_block_priv_lookup()
8769 if (cb_priv->netdev == netdev) in ice_indr_block_priv_lookup()
8780 struct ice_netdev_priv *np = priv->np; in ice_indr_setup_block_cb()
8784 return ice_setup_tc_cls_flower(np, priv->netdev, in ice_indr_setup_block_cb()
8788 return -EOPNOTSUPP; in ice_indr_setup_block_cb()
8803 vlan_dev_real_dev(netdev) == np->vsi->netdev)) in ice_indr_setup_tc_block()
8804 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
8806 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) in ice_indr_setup_tc_block()
8807 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
8809 switch (f->command) { in ice_indr_setup_tc_block()
8813 return -EEXIST; in ice_indr_setup_tc_block()
8817 return -ENOMEM; in ice_indr_setup_tc_block()
8819 indr_priv->netdev = netdev; in ice_indr_setup_tc_block()
8820 indr_priv->np = np; in ice_indr_setup_tc_block()
8821 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); in ice_indr_setup_tc_block()
8831 list_del(&indr_priv->list); in ice_indr_setup_tc_block()
8836 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); in ice_indr_setup_tc_block()
8841 return -ENOENT; in ice_indr_setup_tc_block()
8843 block_cb = flow_block_cb_lookup(f->block, in ice_indr_setup_tc_block()
8847 return -ENOENT; in ice_indr_setup_tc_block()
8851 list_del(&block_cb->driver_list); in ice_indr_setup_tc_block()
8854 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
8871 return -EOPNOTSUPP; in ice_indr_setup_tc_cb()
8876 * ice_open - Called when a network interface becomes active
8890 struct ice_pf *pf = np->vsi->back; in ice_open()
8892 if (ice_is_reset_in_progress(pf->state)) { in ice_open()
8894 return -EBUSY; in ice_open()
8901 * ice_open_internal - Called when a network interface becomes active
8912 struct ice_vsi *vsi = np->vsi; in ice_open_internal()
8913 struct ice_pf *pf = vsi->back; in ice_open_internal()
8917 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_open_internal()
8919 return -EIO; in ice_open_internal()
8924 pi = vsi->port_info; in ice_open_internal()
8931 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_open_internal()
8934 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_open_internal()
8935 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
8936 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { in ice_open_internal()
8952 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
8959 vsi->vsi_num, vsi->vsw->sw_id); in ice_open_internal()
8968 * ice_stop - Disables a network interface
8971 * The stop entry point is called when an interface is de-activated by the OS,
8975 * Returns success only - not allowed to fail
8980 struct ice_vsi *vsi = np->vsi; in ice_stop()
8981 struct ice_pf *pf = vsi->back; in ice_stop()
8983 if (ice_is_reset_in_progress(pf->state)) { in ice_stop()
8985 return -EBUSY; in ice_stop()
8988 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { in ice_stop()
8992 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", in ice_stop()
8993 vsi->vsi_num, link_err); in ice_stop()
8994 return -EIO; in ice_stop()
9004 * ice_features_check - Validate encapsulated packet conforms to limits
9021 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_features_check()
9027 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) in ice_features_check()
9038 if (skb->encapsulation) { in ice_features_check()
9044 if (gso && (skb_shinfo(skb)->gso_type & in ice_features_check()
9046 len = skb_inner_network_header(skb) - in ice_features_check()