Lines Matching +full:container +full:- +full:rules
1 // SPDX-License-Identifier: GPL-2.0
14 * ice_vsi_type_str - maps VSI type enum to string equivalents
38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
55 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
75 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
79 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_arrays()
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
84 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
85 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
86 return -ENOMEM; in ice_vsi_alloc_arrays()
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
89 sizeof(*vsi->rx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
90 if (!vsi->rx_rings) in ice_vsi_alloc_arrays()
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set, in ice_vsi_alloc_arrays()
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), in ice_vsi_alloc_arrays()
100 sizeof(*vsi->txq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
102 if (!vsi->txq_map) in ice_vsi_alloc_arrays()
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
106 sizeof(*vsi->rxq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
107 if (!vsi->rxq_map) in ice_vsi_alloc_arrays()
111 if (vsi->type == ICE_VSI_LB) in ice_vsi_alloc_arrays()
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, in ice_vsi_alloc_arrays()
116 sizeof(*vsi->q_vectors), GFP_KERNEL); in ice_vsi_alloc_arrays()
117 if (!vsi->q_vectors) in ice_vsi_alloc_arrays()
120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); in ice_vsi_alloc_arrays()
121 if (!vsi->af_xdp_zc_qps) in ice_vsi_alloc_arrays()
127 devm_kfree(dev, vsi->q_vectors); in ice_vsi_alloc_arrays()
129 devm_kfree(dev, vsi->rxq_map); in ice_vsi_alloc_arrays()
131 devm_kfree(dev, vsi->txq_map); in ice_vsi_alloc_arrays()
133 devm_kfree(dev, vsi->rx_rings); in ice_vsi_alloc_arrays()
135 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
136 return -ENOMEM; in ice_vsi_alloc_arrays()
140 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
145 switch (vsi->type) { in ice_vsi_set_num_desc()
151 * ethtool -G so we should keep those values instead of in ice_vsi_set_num_desc()
154 if (!vsi->num_rx_desc) in ice_vsi_set_num_desc()
155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; in ice_vsi_set_num_desc()
156 if (!vsi->num_tx_desc) in ice_vsi_set_num_desc()
157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; in ice_vsi_set_num_desc()
160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
161 vsi->type); in ice_vsi_set_num_desc()
167 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
174 enum ice_vsi_type vsi_type = vsi->type; in ice_vsi_set_num_qs()
175 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
176 struct ice_vf *vf = vsi->vf; in ice_vsi_set_num_qs()
183 if (vsi->req_txq) { in ice_vsi_set_num_qs()
184 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
185 vsi->num_txq = vsi->req_txq; in ice_vsi_set_num_qs()
187 vsi->alloc_txq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
192 pf->num_lan_tx = vsi->alloc_txq; in ice_vsi_set_num_qs()
195 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_num_qs()
196 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
198 if (vsi->req_rxq) { in ice_vsi_set_num_qs()
199 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
200 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
202 vsi->alloc_rxq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
208 pf->num_lan_rx = vsi->alloc_rxq; in ice_vsi_set_num_qs()
210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, in ice_vsi_set_num_qs()
211 max_t(int, vsi->alloc_rxq, in ice_vsi_set_num_qs()
212 vsi->alloc_txq)); in ice_vsi_set_num_qs()
218 vsi->alloc_txq = ice_get_num_vfs(pf); in ice_vsi_set_num_qs()
219 vsi->alloc_rxq = vsi->alloc_txq; in ice_vsi_set_num_qs()
220 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
223 if (vf->num_req_qs) in ice_vsi_set_num_qs()
224 vf->num_vf_qs = vf->num_req_qs; in ice_vsi_set_num_qs()
225 vsi->alloc_txq = vf->num_vf_qs; in ice_vsi_set_num_qs()
226 vsi->alloc_rxq = vf->num_vf_qs; in ice_vsi_set_num_qs()
227 /* pf->vfs.num_msix_per includes (VF miscellaneous vector + in ice_vsi_set_num_qs()
228 * data queue interrupts). Since vsi->num_q_vectors is number in ice_vsi_set_num_qs()
232 vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; in ice_vsi_set_num_qs()
235 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
236 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
237 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
240 vsi->alloc_txq = 0; in ice_vsi_set_num_qs()
241 vsi->alloc_rxq = 0; in ice_vsi_set_num_qs()
244 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
245 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
256 * ice_get_free_slot - get the next non-NULL location index in array
269 if (curr < (size - 1) && !tmp_array[curr + 1]) { in ice_get_free_slot()
285 * ice_vsi_delete_from_hw - delete a VSI from the switch
290 struct ice_pf *pf = vsi->back; in ice_vsi_delete_from_hw()
299 if (vsi->type == ICE_VSI_VF) in ice_vsi_delete_from_hw()
300 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw()
301 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw()
303 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw()
305 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw()
307 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", in ice_vsi_delete_from_hw()
308 vsi->vsi_num, status); in ice_vsi_delete_from_hw()
314 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
319 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
324 bitmap_free(vsi->af_xdp_zc_qps); in ice_vsi_free_arrays()
325 vsi->af_xdp_zc_qps = NULL; in ice_vsi_free_arrays()
327 devm_kfree(dev, vsi->q_vectors); in ice_vsi_free_arrays()
328 vsi->q_vectors = NULL; in ice_vsi_free_arrays()
329 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays()
330 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
331 devm_kfree(dev, vsi->rx_rings); in ice_vsi_free_arrays()
332 vsi->rx_rings = NULL; in ice_vsi_free_arrays()
333 devm_kfree(dev, vsi->txq_map); in ice_vsi_free_arrays()
334 vsi->txq_map = NULL; in ice_vsi_free_arrays()
335 devm_kfree(dev, vsi->rxq_map); in ice_vsi_free_arrays()
336 vsi->rxq_map = NULL; in ice_vsi_free_arrays()
340 * ice_vsi_free_stats - Free the ring statistics structures
346 struct ice_pf *pf = vsi->back; in ice_vsi_free_stats()
349 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_free_stats()
351 if (!pf->vsi_stats) in ice_vsi_free_stats()
354 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_free_stats()
359 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_free_stats()
360 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_free_stats()
361 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_free_stats()
366 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_free_stats()
367 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_free_stats()
368 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_free_stats()
372 kfree(vsi_stat->tx_ring_stats); in ice_vsi_free_stats()
373 kfree(vsi_stat->rx_ring_stats); in ice_vsi_free_stats()
375 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_free_stats()
379 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
387 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_ring_stats()
390 vsi_stats = pf->vsi_stats[vsi->idx]; in ice_vsi_alloc_ring_stats()
391 tx_ring_stats = vsi_stats->tx_ring_stats; in ice_vsi_alloc_ring_stats()
392 rx_ring_stats = vsi_stats->rx_ring_stats; in ice_vsi_alloc_ring_stats()
399 ring = vsi->tx_rings[i]; in ice_vsi_alloc_ring_stats()
410 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
418 ring = vsi->rx_rings[i]; in ice_vsi_alloc_ring_stats()
429 ring->ring_stats = ring_stats; in ice_vsi_alloc_ring_stats()
436 return -ENOMEM; in ice_vsi_alloc_ring_stats()
440 * ice_vsi_free - clean up and deallocate the provided VSI
451 if (!vsi || !vsi->back) in ice_vsi_free()
454 pf = vsi->back; in ice_vsi_free()
457 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { in ice_vsi_free()
458 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); in ice_vsi_free()
462 mutex_lock(&pf->sw_mutex); in ice_vsi_free()
465 pf->vsi[vsi->idx] = NULL; in ice_vsi_free()
466 pf->next_vsi = vsi->idx; in ice_vsi_free()
470 mutex_unlock(&pf->sw_mutex); in ice_vsi_free()
481 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
489 if (!q_vector->tx.tx_ring) in ice_msix_clean_ctrl_vsi()
493 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET); in ice_msix_clean_ctrl_vsi()
494 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring); in ice_msix_clean_ctrl_vsi()
500 * ice_msix_clean_rings - MSIX mode Interrupt Handler
508 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_msix_clean_rings()
511 q_vector->total_events++; in ice_msix_clean_rings()
513 napi_schedule(&q_vector->napi); in ice_msix_clean_rings()
521 struct ice_pf *pf = q_vector->vsi->back; in ice_eswitch_msix_clean_rings()
525 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) in ice_eswitch_msix_clean_rings()
530 napi_schedule(&vf->repr->q_vector->napi); in ice_eswitch_msix_clean_rings()
537 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
543 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_stat_arrays()
545 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_alloc_stat_arrays()
547 if (!pf->vsi_stats) in ice_vsi_alloc_stat_arrays()
548 return -ENOENT; in ice_vsi_alloc_stat_arrays()
550 if (pf->vsi_stats[vsi->idx]) in ice_vsi_alloc_stat_arrays()
556 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
558 vsi_stat->tx_ring_stats = in ice_vsi_alloc_stat_arrays()
559 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), in ice_vsi_alloc_stat_arrays()
561 if (!vsi_stat->tx_ring_stats) in ice_vsi_alloc_stat_arrays()
564 vsi_stat->rx_ring_stats = in ice_vsi_alloc_stat_arrays()
565 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), in ice_vsi_alloc_stat_arrays()
567 if (!vsi_stat->rx_ring_stats) in ice_vsi_alloc_stat_arrays()
570 pf->vsi_stats[vsi->idx] = vsi_stat; in ice_vsi_alloc_stat_arrays()
575 kfree(vsi_stat->rx_ring_stats); in ice_vsi_alloc_stat_arrays()
577 kfree(vsi_stat->tx_ring_stats); in ice_vsi_alloc_stat_arrays()
579 pf->vsi_stats[vsi->idx] = NULL; in ice_vsi_alloc_stat_arrays()
580 return -ENOMEM; in ice_vsi_alloc_stat_arrays()
584 * ice_vsi_alloc_def - set default values for already allocated VSI
591 if (vsi->type != ICE_VSI_CHNL) { in ice_vsi_alloc_def()
594 return -ENOMEM; in ice_vsi_alloc_def()
597 switch (vsi->type) { in ice_vsi_alloc_def()
600 vsi->irq_handler = ice_eswitch_msix_clean_rings; in ice_vsi_alloc_def()
604 vsi->irq_handler = ice_msix_clean_rings; in ice_vsi_alloc_def()
608 vsi->irq_handler = ice_msix_clean_ctrl_vsi; in ice_vsi_alloc_def()
612 return -EINVAL; in ice_vsi_alloc_def()
614 vsi->num_rxq = ch->num_rxq; in ice_vsi_alloc_def()
615 vsi->num_txq = ch->num_txq; in ice_vsi_alloc_def()
616 vsi->next_base_q = ch->base_q; in ice_vsi_alloc_def()
623 return -EINVAL; in ice_vsi_alloc_def()
630 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
645 mutex_lock(&pf->sw_mutex); in ice_vsi_alloc()
648 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index in ice_vsi_alloc()
651 if (pf->next_vsi == ICE_NO_VSI) { in ice_vsi_alloc()
660 vsi->back = pf; in ice_vsi_alloc()
661 set_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_alloc()
664 vsi->idx = pf->next_vsi; in ice_vsi_alloc()
665 pf->vsi[pf->next_vsi] = vsi; in ice_vsi_alloc()
667 /* prepare pf->next_vsi for next use */ in ice_vsi_alloc()
668 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, in ice_vsi_alloc()
669 pf->next_vsi); in ice_vsi_alloc()
672 mutex_unlock(&pf->sw_mutex); in ice_vsi_alloc()
677 * ice_alloc_fd_res - Allocate FD resource for a VSI
682 * Returns 0 on success, -EPERM on no-op or -EIO on failure
686 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
693 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_alloc_fd_res()
694 return -EPERM; in ice_alloc_fd_res()
696 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || in ice_alloc_fd_res()
697 vsi->type == ICE_VSI_CHNL)) in ice_alloc_fd_res()
698 return -EPERM; in ice_alloc_fd_res()
701 g_val = pf->hw.func_caps.fd_fltr_guar; in ice_alloc_fd_res()
703 return -EPERM; in ice_alloc_fd_res()
706 b_val = pf->hw.func_caps.fd_fltr_best_effort; in ice_alloc_fd_res()
708 return -EPERM; in ice_alloc_fd_res()
718 if (vsi->type == ICE_VSI_PF) { in ice_alloc_fd_res()
719 vsi->num_gfltr = g_val; in ice_alloc_fd_res()
723 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_alloc_fd_res()
725 return -EPERM; in ice_alloc_fd_res()
727 vsi->num_gfltr = ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
731 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
732 } else if (vsi->type == ICE_VSI_VF) { in ice_alloc_fd_res()
733 vsi->num_gfltr = 0; in ice_alloc_fd_res()
736 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
743 return -EPERM; in ice_alloc_fd_res()
745 if (!main_vsi->all_numtc) in ice_alloc_fd_res()
746 return -EINVAL; in ice_alloc_fd_res()
749 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; in ice_alloc_fd_res()
755 return -EPERM; in ice_alloc_fd_res()
757 g_val -= ICE_PF_VSI_GFLTR; in ice_alloc_fd_res()
759 vsi->num_gfltr = g_val / numtc; in ice_alloc_fd_res()
762 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
769 * ice_vsi_get_qs - Assign queues from PF to VSI
776 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
778 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
779 .pf_map = pf->avail_txqs, in ice_vsi_get_qs()
780 .pf_map_size = pf->max_pf_txqs, in ice_vsi_get_qs()
781 .q_count = vsi->alloc_txq, in ice_vsi_get_qs()
783 .vsi_map = vsi->txq_map, in ice_vsi_get_qs()
788 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
789 .pf_map = pf->avail_rxqs, in ice_vsi_get_qs()
790 .pf_map_size = pf->max_pf_rxqs, in ice_vsi_get_qs()
791 .q_count = vsi->alloc_rxq, in ice_vsi_get_qs()
793 .vsi_map = vsi->rxq_map, in ice_vsi_get_qs()
799 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_get_qs()
805 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
810 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
816 * ice_vsi_put_qs - Release queues from VSI to PF
821 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
824 mutex_lock(&pf->avail_q_mutex); in ice_vsi_put_qs()
827 clear_bit(vsi->txq_map[i], pf->avail_txqs); in ice_vsi_put_qs()
828 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
832 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); in ice_vsi_put_qs()
833 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
836 mutex_unlock(&pf->avail_q_mutex); in ice_vsi_put_qs()
847 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_is_safe_mode()
858 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_is_rdma_ena()
862 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
870 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
876 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); in ice_vsi_clean_rss_flow_fld()
879 vsi->vsi_num, status); in ice_vsi_clean_rss_flow_fld()
883 * ice_rss_clean - Delete RSS related VSI structures and configuration
888 struct ice_pf *pf = vsi->back; in ice_rss_clean()
893 devm_kfree(dev, vsi->rss_hkey_user); in ice_rss_clean()
894 devm_kfree(dev, vsi->rss_lut_user); in ice_rss_clean()
899 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); in ice_rss_clean()
903 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
909 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
912 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_rss_params()
913 vsi->rss_size = 1; in ice_vsi_set_rss_params()
917 cap = &pf->hw.func_caps.common_cap; in ice_vsi_set_rss_params()
918 max_rss_size = BIT(cap->rss_table_entry_width); in ice_vsi_set_rss_params()
919 switch (vsi->type) { in ice_vsi_set_rss_params()
923 vsi->rss_table_size = (u16)cap->rss_table_size; in ice_vsi_set_rss_params()
924 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_set_rss_params()
925 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); in ice_vsi_set_rss_params()
927 vsi->rss_size = min_t(u16, num_online_cpus(), in ice_vsi_set_rss_params()
929 vsi->rss_lut_type = ICE_LUT_PF; in ice_vsi_set_rss_params()
932 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
933 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); in ice_vsi_set_rss_params()
934 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
940 vsi->rss_table_size = ICE_LUT_VSI_SIZE; in ice_vsi_set_rss_params()
941 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; in ice_vsi_set_rss_params()
942 vsi->rss_lut_type = ICE_LUT_VSI; in ice_vsi_set_rss_params()
948 ice_vsi_type_str(vsi->type)); in ice_vsi_set_rss_params()
954 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
964 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx()
966 ctxt->alloc_from_pool = true; in ice_set_dflt_vsi_ctx()
968 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; in ice_set_dflt_vsi_ctx()
970 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; in ice_set_dflt_vsi_ctx()
972 ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & in ice_set_dflt_vsi_ctx()
975 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which in ice_set_dflt_vsi_ctx()
978 * DVM - leave inner VLAN in packet by default in ice_set_dflt_vsi_ctx()
981 ctxt->info.inner_vlan_flags |= in ice_set_dflt_vsi_ctx()
983 ctxt->info.outer_vlan_flags = in ice_set_dflt_vsi_ctx()
987 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
991 ctxt->info.outer_vlan_flags |= in ice_set_dflt_vsi_ctx()
1004 ctxt->info.ingress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1005 ctxt->info.egress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1007 ctxt->info.outer_up_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
1012 * ice_vsi_setup_q_map - Setup a VSI queue map
1020 u16 qcount_tx = vsi->alloc_txq; in ice_vsi_setup_q_map()
1021 u16 qcount_rx = vsi->alloc_rxq; in ice_vsi_setup_q_map()
1025 if (!vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map()
1027 vsi->tc_cfg.numtc = 1; in ice_vsi_setup_q_map()
1028 vsi->tc_cfg.ena_tc = 1; in ice_vsi_setup_q_map()
1031 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); in ice_vsi_setup_q_map()
1034 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; in ice_vsi_setup_q_map()
1038 /* find the (rounded up) power-of-2 of qcount */ in ice_vsi_setup_q_map()
1044 * queues allocated to TC0. No:of queues is a power-of-2. in ice_vsi_setup_q_map()
1053 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map()
1055 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map()
1056 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map()
1057 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map()
1058 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map()
1059 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map()
1064 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map()
1065 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; in ice_vsi_setup_q_map()
1066 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; in ice_vsi_setup_q_map()
1067 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map()
1075 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map()
1078 /* if offset is non-zero, means it is calculated correctly based on in ice_vsi_setup_q_map()
1080 * be correct and non-zero because it is based off - VSI's in ice_vsi_setup_q_map()
1089 if (rx_count > vsi->alloc_rxq) { in ice_vsi_setup_q_map()
1090 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1091 rx_count, vsi->alloc_rxq); in ice_vsi_setup_q_map()
1092 return -EINVAL; in ice_vsi_setup_q_map()
1095 if (tx_count > vsi->alloc_txq) { in ice_vsi_setup_q_map()
1096 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1097 tx_count, vsi->alloc_txq); in ice_vsi_setup_q_map()
1098 return -EINVAL; in ice_vsi_setup_q_map()
1101 vsi->num_txq = tx_count; in ice_vsi_setup_q_map()
1102 vsi->num_rxq = rx_count; in ice_vsi_setup_q_map()
1104 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map()
1105 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
1109 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map()
1113 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_vsi_setup_q_map()
1118 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map()
1119 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map()
1125 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1134 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && in ice_set_fd_vsi_ctx()
1135 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) in ice_set_fd_vsi_ctx()
1139 ctxt->info.valid_sections |= cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1147 ctxt->info.fd_options = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1149 ctxt->info.max_fd_fltr_dedicated = in ice_set_fd_vsi_ctx()
1150 cpu_to_le16(vsi->num_gfltr); in ice_set_fd_vsi_ctx()
1152 ctxt->info.max_fd_fltr_shared = in ice_set_fd_vsi_ctx()
1153 cpu_to_le16(vsi->num_bfltr); in ice_set_fd_vsi_ctx()
1160 ctxt->info.fd_def_q = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1167 ctxt->info.fd_report_opt = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
1171 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1181 pf = vsi->back; in ice_set_rss_vsi_ctx()
1184 switch (vsi->type) { in ice_set_rss_vsi_ctx()
1198 ice_vsi_type_str(vsi->type)); in ice_set_rss_vsi_ctx()
1202 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & in ice_set_rss_vsi_ctx()
1210 struct ice_pf *pf = vsi->back; in ice_chnl_vsi_setup_q_map()
1215 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); in ice_chnl_vsi_setup_q_map()
1223 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_chnl_vsi_setup_q_map()
1224 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_chnl_vsi_setup_q_map()
1225 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q); in ice_chnl_vsi_setup_q_map()
1226 ctxt->info.q_mapping[1] = cpu_to_le16(qcount); in ice_chnl_vsi_setup_q_map()
1230 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1237 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_vsi_is_vlan_pruning_ena()
1241 * ice_vsi_init - Create and initialize a VSI
1253 struct ice_pf *pf = vsi->back; in ice_vsi_init()
1254 struct ice_hw *hw = &pf->hw; in ice_vsi_init()
1262 return -ENOMEM; in ice_vsi_init()
1264 switch (vsi->type) { in ice_vsi_init()
1268 ctxt->flags = ICE_AQ_VSI_TYPE_PF; in ice_vsi_init()
1272 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; in ice_vsi_init()
1275 ctxt->flags = ICE_AQ_VSI_TYPE_VF; in ice_vsi_init()
1276 /* VF number here is the absolute VF number (0-255) */ in ice_vsi_init()
1277 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id; in ice_vsi_init()
1280 ret = -ENODEV; in ice_vsi_init()
1287 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1292 ctxt->info.sw_flags2 |= in ice_vsi_init()
1295 ctxt->info.sw_flags2 &= in ice_vsi_init()
1300 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_vsi_init()
1303 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) in ice_vsi_init()
1304 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_init()
1307 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && in ice_vsi_init()
1308 vsi->type != ICE_VSI_CTRL) { in ice_vsi_init()
1314 ctxt->info.valid_sections |= in ice_vsi_init()
1318 ctxt->info.sw_id = vsi->port_info->sw_id; in ice_vsi_init()
1319 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_init()
1331 ctxt->info.valid_sections |= in ice_vsi_init()
1336 if (vsi->type == ICE_VSI_PF) { in ice_vsi_init()
1337 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_init()
1338 ctxt->info.valid_sections |= in ice_vsi_init()
1343 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1346 ret = -EIO; in ice_vsi_init()
1350 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1353 ret = -EIO; in ice_vsi_init()
1359 vsi->info = ctxt->info; in ice_vsi_init()
1362 vsi->vsi_num = ctxt->vsi_num; in ice_vsi_init()
1370 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1378 if (vsi->q_vectors) { in ice_vsi_clear_rings()
1380 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings()
1383 q_vector->tx.tx_ring = NULL; in ice_vsi_clear_rings()
1384 q_vector->rx.rx_ring = NULL; in ice_vsi_clear_rings()
1389 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1391 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1392 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
1393 WRITE_ONCE(vsi->tx_rings[i], NULL); in ice_vsi_clear_rings()
1397 if (vsi->rx_rings) { in ice_vsi_clear_rings()
1399 if (vsi->rx_rings[i]) { in ice_vsi_clear_rings()
1400 kfree_rcu(vsi->rx_rings[i], rcu); in ice_vsi_clear_rings()
1401 WRITE_ONCE(vsi->rx_rings[i], NULL); in ice_vsi_clear_rings()
1408 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1413 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); in ice_vsi_alloc_rings()
1414 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1429 ring->q_index = i; in ice_vsi_alloc_rings()
1430 ring->reg_idx = vsi->txq_map[i]; in ice_vsi_alloc_rings()
1431 ring->vsi = vsi; in ice_vsi_alloc_rings()
1432 ring->tx_tstamps = &pf->ptp.port.tx; in ice_vsi_alloc_rings()
1433 ring->dev = dev; in ice_vsi_alloc_rings()
1434 ring->count = vsi->num_tx_desc; in ice_vsi_alloc_rings()
1435 ring->txq_teid = ICE_INVAL_TEID; in ice_vsi_alloc_rings()
1437 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; in ice_vsi_alloc_rings()
1439 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1; in ice_vsi_alloc_rings()
1440 WRITE_ONCE(vsi->tx_rings[i], ring); in ice_vsi_alloc_rings()
1452 ring->q_index = i; in ice_vsi_alloc_rings()
1453 ring->reg_idx = vsi->rxq_map[i]; in ice_vsi_alloc_rings()
1454 ring->vsi = vsi; in ice_vsi_alloc_rings()
1455 ring->netdev = vsi->netdev; in ice_vsi_alloc_rings()
1456 ring->dev = dev; in ice_vsi_alloc_rings()
1457 ring->count = vsi->num_rx_desc; in ice_vsi_alloc_rings()
1458 ring->cached_phctime = pf->ptp.cached_phc_time; in ice_vsi_alloc_rings()
1459 WRITE_ONCE(vsi->rx_rings[i], ring); in ice_vsi_alloc_rings()
1466 return -ENOMEM; in ice_vsi_alloc_rings()
1470 * ice_vsi_manage_rss_lut - disable/enable RSS
1482 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_manage_rss_lut()
1487 if (vsi->rss_lut_user) in ice_vsi_manage_rss_lut()
1488 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1490 ice_fill_rss_lut(lut, vsi->rss_table_size, in ice_vsi_manage_rss_lut()
1491 vsi->rss_size); in ice_vsi_manage_rss_lut()
1494 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1499 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1509 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1511 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; in ice_vsi_cfg_crc_strip()
1515 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1520 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1526 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size && in ice_vsi_cfg_rss_lut_key()
1527 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) { in ice_vsi_cfg_rss_lut_key()
1528 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size); in ice_vsi_cfg_rss_lut_key()
1530 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key()
1534 * orig_rss_size so that when tc-qdisc is deleted, main VSI in ice_vsi_cfg_rss_lut_key()
1536 * to begin with (prior to setup-tc for ADQ config) in ice_vsi_cfg_rss_lut_key()
1538 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size && in ice_vsi_cfg_rss_lut_key()
1539 vsi->orig_rss_size <= vsi->num_rxq) { in ice_vsi_cfg_rss_lut_key()
1540 vsi->rss_size = vsi->orig_rss_size; in ice_vsi_cfg_rss_lut_key()
1542 vsi->orig_rss_size = 0; in ice_vsi_cfg_rss_lut_key()
1546 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_cfg_rss_lut_key()
1548 return -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1550 if (vsi->rss_lut_user) in ice_vsi_cfg_rss_lut_key()
1551 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1553 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); in ice_vsi_cfg_rss_lut_key()
1555 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1563 err = -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1567 if (vsi->rss_hkey_user) in ice_vsi_cfg_rss_lut_key()
1568 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); in ice_vsi_cfg_rss_lut_key()
1583 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1592 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1599 vsi->vsi_num); in ice_vsi_set_vf_rss_flow_fld()
1603 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); in ice_vsi_set_vf_rss_flow_fld()
1606 vsi->vsi_num, status); in ice_vsi_set_vf_rss_flow_fld()
1610 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1622 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; in ice_vsi_set_rss_flow_fld()
1623 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1624 struct ice_hw *hw = &pf->hw; in ice_vsi_set_rss_flow_fld()
1698 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1703 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { in ice_vsi_cfg_frame_size()
1704 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; in ice_vsi_cfg_frame_size()
1705 vsi->rx_buf_len = ICE_RXBUF_1664; in ice_vsi_cfg_frame_size()
1708 (vsi->netdev->mtu <= ETH_DATA_LEN)) { in ice_vsi_cfg_frame_size()
1709 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1710 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1713 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; in ice_vsi_cfg_frame_size()
1714 vsi->rx_buf_len = ICE_RXBUF_3072; in ice_vsi_cfg_frame_size()
1719 * ice_pf_state_is_nominal - checks the PF for nominal state
1736 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) in ice_pf_state_is_nominal()
1743 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1749 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1750 struct ice_pf *pf = vsi->back; in ice_update_eth_stats()
1751 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ in ice_update_eth_stats()
1753 prev_es = &vsi->eth_stats_prev; in ice_update_eth_stats()
1754 cur_es = &vsi->eth_stats; in ice_update_eth_stats()
1756 if (ice_is_reset_in_progress(pf->state)) in ice_update_eth_stats()
1757 vsi->stat_offsets_loaded = false; in ice_update_eth_stats()
1759 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1760 &prev_es->rx_bytes, &cur_es->rx_bytes); in ice_update_eth_stats()
1762 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1763 &prev_es->rx_unicast, &cur_es->rx_unicast); in ice_update_eth_stats()
1765 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1766 &prev_es->rx_multicast, &cur_es->rx_multicast); in ice_update_eth_stats()
1768 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1769 &prev_es->rx_broadcast, &cur_es->rx_broadcast); in ice_update_eth_stats()
1771 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1772 &prev_es->rx_discards, &cur_es->rx_discards); in ice_update_eth_stats()
1774 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1775 &prev_es->tx_bytes, &cur_es->tx_bytes); in ice_update_eth_stats()
1777 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1778 &prev_es->tx_unicast, &cur_es->tx_unicast); in ice_update_eth_stats()
1780 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1781 &prev_es->tx_multicast, &cur_es->tx_multicast); in ice_update_eth_stats()
1783 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1784 &prev_es->tx_broadcast, &cur_es->tx_broadcast); in ice_update_eth_stats()
1786 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1787 &prev_es->tx_errors, &cur_es->tx_errors); in ice_update_eth_stats()
1789 vsi->stat_offsets_loaded = true; in ice_update_eth_stats()
1793 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1826 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
1827 return -EINVAL; in ice_vsi_cfg_single_rxq()
1829 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
1837 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
1838 return -EINVAL; in ice_vsi_cfg_single_txq()
1842 return -ENOMEM; in ice_vsi_cfg_single_txq()
1844 qg_buf->num_txqs = 1; in ice_vsi_cfg_single_txq()
1852 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1862 if (vsi->type == ICE_VSI_VF) in ice_vsi_cfg_rxqs()
1869 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); in ice_vsi_cfg_rxqs()
1879 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1896 return -ENOMEM; in ice_vsi_cfg_txqs()
1898 qg_buf->num_txqs = 1; in ice_vsi_cfg_txqs()
1912 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1920 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); in ice_vsi_cfg_lan_txqs()
1924 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1935 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_cfg_xdp_txqs()
1946 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1963 * ice_write_intrl - write throttle rate limit to interrupt specific register
1969 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1971 wr32(hw, GLINT_RATE(q_vector->reg_idx), in ice_write_intrl()
1977 switch (rc->type) { in ice_pull_qvec_from_rc()
1979 if (rc->rx_ring) in ice_pull_qvec_from_rc()
1980 return rc->rx_ring->q_vector; in ice_pull_qvec_from_rc()
1983 if (rc->tx_ring) in ice_pull_qvec_from_rc()
1984 return rc->tx_ring->q_vector; in ice_pull_qvec_from_rc()
1994 * __ice_write_itr - write throttle rate to register
1996 * @rc: pointer to ring container
2002 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
2004 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), in __ice_write_itr()
2009 * ice_write_itr - write throttle rate to queue specific register
2010 * @rc: pointer to ring container
2025 * ice_set_q_vector_intrl - set up interrupt rate limiting
2028 * Interrupt rate limiting is local to the vector, not per-queue so we must
2029 * detect if either ring container has dynamic moderation enabled to decide
2036 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) { in ice_set_q_vector_intrl()
2045 ice_write_intrl(q_vector, q_vector->intrl); in ice_set_q_vector_intrl()
2050 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
2058 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
2059 struct ice_hw *hw = &pf->hw; in ice_vsi_cfg_msix()
2064 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_cfg_msix()
2065 u16 reg_idx = q_vector->reg_idx; in ice_vsi_cfg_msix()
2074 * For SR-IOV VF VSIs queue vector index always starts in ice_vsi_cfg_msix()
2080 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_cfg_msix()
2082 q_vector->tx.itr_idx); in ice_vsi_cfg_msix()
2086 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_cfg_msix()
2088 q_vector->rx.itr_idx); in ice_vsi_cfg_msix()
2095 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
2106 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
2117 * ice_vsi_stop_tx_rings - Disable Tx rings
2130 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) in ice_vsi_stop_tx_rings()
2131 return -EINVAL; in ice_vsi_stop_tx_rings()
2138 return -EINVAL; in ice_vsi_stop_tx_rings()
2152 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2161 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); in ice_vsi_stop_lan_tx_rings()
2165 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2170 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
2181 struct ice_pf *pf = vsi->back; in ice_vsi_is_rx_queue_active()
2182 struct ice_hw *hw = &pf->hw; in ice_vsi_is_rx_queue_active()
2189 pf_q = vsi->rxq_map[i]; in ice_vsi_is_rx_queue_active()
2200 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { in ice_vsi_set_tc_cfg()
2201 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; in ice_vsi_set_tc_cfg()
2202 vsi->tc_cfg.numtc = 1; in ice_vsi_set_tc_cfg()
2211 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2220 struct ice_pf *pf = vsi->back; in ice_cfg_sw_lldp()
2231 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { in ice_cfg_sw_lldp()
2232 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, in ice_cfg_sw_lldp()
2243 vsi->vsi_num, status); in ice_cfg_sw_lldp()
2247 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2255 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2261 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2267 * - PF aggregator node to contains VSIs of type _PF and _CTRL in ice_set_agg_vsi()
2268 * - VF aggregator nodes will contain VF VSI in ice_set_agg_vsi()
2270 port_info = pf->hw.port_info; in ice_set_agg_vsi()
2274 switch (vsi->type) { in ice_set_agg_vsi()
2282 agg_node_iter = &pf->pf_agg_node[0]; in ice_set_agg_vsi()
2293 agg_node_iter = &pf->vf_agg_node[0]; in ice_set_agg_vsi()
2298 ice_vsi_type_str(vsi->type)); in ice_set_agg_vsi()
2307 if (agg_node_iter->num_vsis && in ice_set_agg_vsi()
2308 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { in ice_set_agg_vsi()
2313 if (agg_node_iter->valid && in ice_set_agg_vsi()
2314 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2315 agg_id = agg_node_iter->agg_id; in ice_set_agg_vsi()
2321 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2334 if (!agg_node->valid) { in ice_set_agg_vsi()
2336 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2343 agg_node->valid = true; in ice_set_agg_vsi()
2344 agg_node->agg_id = agg_id; in ice_set_agg_vsi()
2348 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, in ice_set_agg_vsi()
2349 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2352 vsi->idx, agg_id); in ice_set_agg_vsi()
2357 agg_node->num_vsis++; in ice_set_agg_vsi()
2359 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved in ice_set_agg_vsi()
2362 vsi->agg_node = agg_node; in ice_set_agg_vsi()
2364 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, in ice_set_agg_vsi()
2365 vsi->agg_node->num_vsis); in ice_set_agg_vsi()
2376 if (!(vsi->tc_cfg.ena_tc & BIT(i))) in ice_vsi_cfg_tc_lan()
2379 if (vsi->type == ICE_VSI_CHNL) { in ice_vsi_cfg_tc_lan()
2380 if (!vsi->alloc_txq && vsi->num_txq) in ice_vsi_cfg_tc_lan()
2381 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc_lan()
2383 max_txqs[i] = pf->num_lan_tx; in ice_vsi_cfg_tc_lan()
2385 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc_lan()
2389 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); in ice_vsi_cfg_tc_lan()
2390 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_cfg_tc_lan()
2394 vsi->vsi_num, ret); in ice_vsi_cfg_tc_lan()
2402 * ice_vsi_cfg_def - configure default VSI based on the type
2409 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_cfg_def()
2410 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_def()
2413 vsi->vsw = pf->first_sw; in ice_vsi_cfg_def()
2415 ret = ice_vsi_alloc_def(vsi, params->ch); in ice_vsi_cfg_def()
2428 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", in ice_vsi_cfg_def()
2429 vsi->idx); in ice_vsi_cfg_def()
2440 ret = ice_vsi_init(vsi, params->flags); in ice_vsi_cfg_def()
2446 switch (vsi->type) { in ice_vsi_cfg_def()
2463 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2469 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); in ice_vsi_cfg_def()
2475 if (vsi->type != ICE_VSI_CTRL) in ice_vsi_cfg_def()
2480 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2487 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2510 vsi->stat_offsets_loaded = false; in ice_vsi_cfg_def()
2516 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_cfg_def()
2533 ret = -EINVAL; in ice_vsi_cfg_def()
2555 * ice_vsi_cfg - configure a previously allocated VSI
2561 struct ice_pf *pf = vsi->back; in ice_vsi_cfg()
2564 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) in ice_vsi_cfg()
2565 return -EINVAL; in ice_vsi_cfg()
2567 vsi->type = params->type; in ice_vsi_cfg()
2568 vsi->port_info = params->pi; in ice_vsi_cfg()
2571 vsi->vf = params->vf; in ice_vsi_cfg()
2577 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); in ice_vsi_cfg()
2581 if (vsi->type == ICE_VSI_CTRL) { in ice_vsi_cfg()
2582 if (vsi->vf) { in ice_vsi_cfg()
2583 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2584 vsi->vf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2586 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); in ice_vsi_cfg()
2587 pf->ctrl_vsi_idx = vsi->idx; in ice_vsi_cfg()
2595 * ice_vsi_decfg - remove all VSI configuration
2600 struct ice_pf *pf = vsi->back; in ice_vsi_decfg()
2606 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && in ice_vsi_decfg()
2607 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) in ice_vsi_decfg()
2610 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2611 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_decfg()
2614 vsi->vsi_num, err); in ice_vsi_decfg()
2627 /* SR-IOV determines needed MSIX resources all at once instead of per in ice_vsi_decfg()
2629 * many interrupts each VF needs. SR-IOV MSIX resources are also in ice_vsi_decfg()
2633 if (vsi->type == ICE_VSI_VF && in ice_vsi_decfg()
2634 vsi->agg_node && vsi->agg_node->valid) in ice_vsi_decfg()
2635 vsi->agg_node->num_vsis--; in ice_vsi_decfg()
2636 if (vsi->agg_node) { in ice_vsi_decfg()
2637 vsi->agg_node->valid = false; in ice_vsi_decfg()
2638 vsi->agg_node->agg_id = 0; in ice_vsi_decfg()
2643 * ice_vsi_setup - Set up a VSI by a given type
2662 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || in ice_vsi_setup()
2663 WARN_ON(!params->pi)) in ice_vsi_setup()
2681 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to in ice_vsi_setup()
2685 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { in ice_vsi_setup()
2691 if (!vsi->agg_node) in ice_vsi_setup()
2703 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2708 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2709 struct ice_hw *hw = &pf->hw; in ice_vsi_release_msix()
2715 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_release_msix()
2718 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_release_msix()
2719 ice_write_itr(&q_vector->tx, 0); in ice_vsi_release_msix()
2720 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); in ice_vsi_release_msix()
2722 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_vsi_release_msix()
2724 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); in ice_vsi_release_msix()
2729 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_release_msix()
2730 ice_write_itr(&q_vector->rx, 0); in ice_vsi_release_msix()
2731 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); in ice_vsi_release_msix()
2740 * ice_vsi_free_irq - Free the IRQ association with the OS
2745 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2748 if (!vsi->q_vectors || !vsi->irqs_ready) in ice_vsi_free_irq()
2752 if (vsi->type == ICE_VSI_VF) in ice_vsi_free_irq()
2755 vsi->irqs_ready = false; in ice_vsi_free_irq()
2761 irq_num = vsi->q_vectors[i]->irq.virq; in ice_vsi_free_irq()
2764 if (!vsi->q_vectors[i] || in ice_vsi_free_irq()
2765 !(vsi->q_vectors[i]->num_ring_tx || in ice_vsi_free_irq()
2766 vsi->q_vectors[i]->num_ring_rx)) in ice_vsi_free_irq()
2776 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); in ice_vsi_free_irq()
2781 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2788 if (!vsi->tx_rings) in ice_vsi_free_tx_rings()
2792 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_vsi_free_tx_rings()
2793 ice_free_tx_ring(vsi->tx_rings[i]); in ice_vsi_free_tx_rings()
2797 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2804 if (!vsi->rx_rings) in ice_vsi_free_rx_rings()
2808 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in ice_vsi_free_rx_rings()
2809 ice_free_rx_ring(vsi->rx_rings[i]); in ice_vsi_free_rx_rings()
2813 * ice_vsi_close - Shut down a VSI
2818 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_vsi_close()
2827 * ice_ena_vsi - resume a VSI
2835 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) in ice_ena_vsi()
2838 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_ena_vsi()
2840 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_ena_vsi()
2841 if (netif_running(vsi->netdev)) { in ice_ena_vsi()
2845 err = ice_open_internal(vsi->netdev); in ice_ena_vsi()
2850 } else if (vsi->type == ICE_VSI_CTRL) { in ice_ena_vsi()
2858 * ice_dis_vsi - pause a VSI
2864 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_dis_vsi()
2867 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_dis_vsi()
2869 if (vsi->type == ICE_VSI_PF && vsi->netdev) { in ice_dis_vsi()
2870 if (netif_running(vsi->netdev)) { in ice_dis_vsi()
2881 } else if (vsi->type == ICE_VSI_CTRL || in ice_dis_vsi()
2882 vsi->type == ICE_VSI_SWITCHDEV_CTRL) { in ice_dis_vsi()
2888 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2889 * @vsi: the VSI being un-configured
2893 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
2894 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
2899 if (vsi->tx_rings) { in ice_vsi_dis_irq()
2901 if (vsi->tx_rings[i]) { in ice_vsi_dis_irq()
2904 reg = vsi->tx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2912 if (vsi->rx_rings) { in ice_vsi_dis_irq()
2914 if (vsi->rx_rings[i]) { in ice_vsi_dis_irq()
2917 reg = vsi->rx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2927 if (!vsi->q_vectors[i]) in ice_vsi_dis_irq()
2929 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); in ice_vsi_dis_irq()
2935 if (vsi->type == ICE_VSI_VF) in ice_vsi_dis_irq()
2939 synchronize_irq(vsi->q_vectors[i]->irq.virq); in ice_vsi_dis_irq()
2943 * ice_vsi_release - Delete a VSI and free its resources
2952 if (!vsi->back) in ice_vsi_release()
2953 return -ENODEV; in ice_vsi_release()
2954 pf = vsi->back; in ice_vsi_release()
2956 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) in ice_vsi_release()
2966 if (!ice_is_reset_in_progress(pf->state)) in ice_vsi_release()
2973 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2986 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_rebuild_get_coalesce()
2988 coalesce[i].itr_tx = q_vector->tx.itr_settings; in ice_vsi_rebuild_get_coalesce()
2989 coalesce[i].itr_rx = q_vector->rx.itr_settings; in ice_vsi_rebuild_get_coalesce()
2990 coalesce[i].intrl = q_vector->intrl; in ice_vsi_rebuild_get_coalesce()
2992 if (i < vsi->num_txq) in ice_vsi_rebuild_get_coalesce()
2994 if (i < vsi->num_rxq) in ice_vsi_rebuild_get_coalesce()
2998 return vsi->num_q_vectors; in ice_vsi_rebuild_get_coalesce()
3002 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
3027 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3043 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { in ice_vsi_rebuild_set_coalesce()
3044 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3045 rc->itr_settings = coalesce[i].itr_rx; in ice_vsi_rebuild_set_coalesce()
3046 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3047 } else if (i < vsi->alloc_rxq) { in ice_vsi_rebuild_set_coalesce()
3048 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3049 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3050 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3053 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { in ice_vsi_rebuild_set_coalesce()
3054 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3055 rc->itr_settings = coalesce[i].itr_tx; in ice_vsi_rebuild_set_coalesce()
3056 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3057 } else if (i < vsi->alloc_txq) { in ice_vsi_rebuild_set_coalesce()
3058 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3059 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3060 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3063 vsi->q_vectors[i]->intrl = coalesce[i].intrl; in ice_vsi_rebuild_set_coalesce()
3064 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
3070 for (; i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3072 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3073 rc->itr_settings = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3074 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3077 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3078 rc->itr_settings = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3079 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3081 vsi->q_vectors[i]->intrl = coalesce[0].intrl; in ice_vsi_rebuild_set_coalesce()
3082 ice_set_q_vector_intrl(vsi->q_vectors[i]); in ice_vsi_rebuild_set_coalesce()
3087 * ice_vsi_realloc_stat_arrays - Frees unused stat structures
3096 struct ice_pf *pf = vsi->back; in ice_vsi_realloc_stat_arrays()
3101 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_realloc_stat_arrays()
3104 vsi_stat = pf->vsi_stats[vsi->idx]; in ice_vsi_realloc_stat_arrays()
3106 if (vsi->num_txq < prev_txq) { in ice_vsi_realloc_stat_arrays()
3107 for (i = vsi->num_txq; i < prev_txq; i++) { in ice_vsi_realloc_stat_arrays()
3108 if (vsi_stat->tx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
3109 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
3110 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
3115 if (vsi->num_rxq < prev_rxq) { in ice_vsi_realloc_stat_arrays()
3116 for (i = vsi->num_rxq; i < prev_rxq; i++) { in ice_vsi_realloc_stat_arrays()
3117 if (vsi_stat->rx_ring_stats[i]) { in ice_vsi_realloc_stat_arrays()
3118 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); in ice_vsi_realloc_stat_arrays()
3119 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); in ice_vsi_realloc_stat_arrays()
3126 * ice_vsi_rebuild - Rebuild VSI after reset
3144 return -EINVAL; in ice_vsi_rebuild()
3149 pf = vsi->back; in ice_vsi_rebuild()
3150 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) in ice_vsi_rebuild()
3151 return -EINVAL; in ice_vsi_rebuild()
3153 coalesce = kcalloc(vsi->num_q_vectors, in ice_vsi_rebuild()
3156 return -ENOMEM; in ice_vsi_rebuild()
3160 prev_txq = vsi->num_txq; in ice_vsi_rebuild()
3161 prev_rxq = vsi->num_rxq; in ice_vsi_rebuild()
3171 ret = -EIO; in ice_vsi_rebuild()
3194 * ice_is_reset_in_progress - check for a reset in progress
3206 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3215 * Returns 0 on success, -EBUSY if the reset is not finished within the
3216 * timeout, and -ERESTARTSYS if the thread was interrupted.
3222 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, in ice_wait_for_reset()
3223 !ice_is_reset_in_progress(pf->state), in ice_wait_for_reset()
3228 return -EBUSY; in ice_wait_for_reset()
3234 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3240 vsi->info.mapping_flags = ctx->info.mapping_flags; in ice_vsi_update_q_map()
3241 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, in ice_vsi_update_q_map()
3242 sizeof(vsi->info.q_mapping)); in ice_vsi_update_q_map()
3243 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, in ice_vsi_update_q_map()
3244 sizeof(vsi->info.tc_mapping)); in ice_vsi_update_q_map()
3248 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3254 struct net_device *netdev = vsi->netdev; in ice_vsi_cfg_netdev_tc()
3255 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
3256 int numtc = vsi->tc_cfg.numtc; in ice_vsi_cfg_netdev_tc()
3265 if (vsi->type == ICE_VSI_CHNL) in ice_vsi_cfg_netdev_tc()
3273 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf)) in ice_vsi_cfg_netdev_tc()
3274 numtc = vsi->all_numtc; in ice_vsi_cfg_netdev_tc()
3279 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_vsi_cfg_netdev_tc()
3282 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_cfg_netdev_tc()
3284 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_cfg_netdev_tc()
3285 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_cfg_netdev_tc()
3286 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_cfg_netdev_tc()
3289 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_cfg_netdev_tc()
3291 if (!vsi->mqprio_qopt.qopt.count[i]) in ice_vsi_cfg_netdev_tc()
3294 vsi->mqprio_qopt.qopt.count[i], in ice_vsi_cfg_netdev_tc()
3295 vsi->mqprio_qopt.qopt.offset[i]); in ice_vsi_cfg_netdev_tc()
3298 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_netdev_tc()
3302 u8 ets_tc = dcbcfg->etscfg.prio_table[i]; in ice_vsi_cfg_netdev_tc()
3305 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; in ice_vsi_cfg_netdev_tc()
3311 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3323 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; in ice_vsi_setup_q_map_mqprio()
3324 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; in ice_vsi_setup_q_map_mqprio()
3329 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; in ice_vsi_setup_q_map_mqprio()
3337 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map_mqprio()
3339 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map_mqprio()
3340 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map_mqprio()
3341 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map_mqprio()
3342 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map_mqprio()
3343 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map_mqprio()
3347 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3348 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3349 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3350 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map_mqprio()
3351 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; in ice_vsi_setup_q_map_mqprio()
3352 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx; in ice_vsi_setup_q_map_mqprio()
3353 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map_mqprio()
3356 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map_mqprio()
3358 if (!(vsi->all_enatc & BIT(i))) in ice_vsi_setup_q_map_mqprio()
3360 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_vsi_setup_q_map_mqprio()
3361 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3362 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_vsi_setup_q_map_mqprio()
3367 if (new_txq > vsi->alloc_txq) { in ice_vsi_setup_q_map_mqprio()
3368 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3369 new_txq, vsi->alloc_txq); in ice_vsi_setup_q_map_mqprio()
3370 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3374 if (new_rxq > vsi->alloc_rxq) { in ice_vsi_setup_q_map_mqprio()
3375 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3376 new_rxq, vsi->alloc_rxq); in ice_vsi_setup_q_map_mqprio()
3377 return -EINVAL; in ice_vsi_setup_q_map_mqprio()
3381 vsi->num_txq = new_txq; in ice_vsi_setup_q_map_mqprio()
3382 vsi->num_rxq = new_rxq; in ice_vsi_setup_q_map_mqprio()
3385 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_vsi_setup_q_map_mqprio()
3386 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map_mqprio()
3387 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount); in ice_vsi_setup_q_map_mqprio()
3392 if (tc0_qcount && tc0_qcount < vsi->num_rxq) { in ice_vsi_setup_q_map_mqprio()
3393 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3394 vsi->next_base_q = tc0_qcount; in ice_vsi_setup_q_map_mqprio()
3396 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); in ice_vsi_setup_q_map_mqprio()
3397 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); in ice_vsi_setup_q_map_mqprio()
3398 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", in ice_vsi_setup_q_map_mqprio()
3399 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); in ice_vsi_setup_q_map_mqprio()
3405 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3414 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3422 if (vsi->tc_cfg.ena_tc == ena_tc && in ice_vsi_cfg_tc()
3423 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in ice_vsi_cfg_tc()
3431 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc()
3435 if (vsi->type == ICE_VSI_CHNL && in ice_vsi_cfg_tc()
3436 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3437 max_txqs[i] = vsi->num_txq; in ice_vsi_cfg_tc()
3440 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); in ice_vsi_cfg_tc()
3441 vsi->tc_cfg.ena_tc = ena_tc; in ice_vsi_cfg_tc()
3442 vsi->tc_cfg.numtc = num_tc; in ice_vsi_cfg_tc()
3446 return -ENOMEM; in ice_vsi_cfg_tc()
3448 ctx->vf_num = 0; in ice_vsi_cfg_tc()
3449 ctx->info = vsi->info; in ice_vsi_cfg_tc()
3451 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3452 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3458 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); in ice_vsi_cfg_tc()
3463 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); in ice_vsi_cfg_tc()
3464 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); in ice_vsi_cfg_tc()
3470 if (vsi->type == ICE_VSI_PF && in ice_vsi_cfg_tc()
3471 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_vsi_cfg_tc()
3472 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); in ice_vsi_cfg_tc()
3474 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, in ice_vsi_cfg_tc()
3475 vsi->tc_cfg.ena_tc, max_txqs); in ice_vsi_cfg_tc()
3479 vsi->vsi_num, ret); in ice_vsi_cfg_tc()
3483 vsi->info.valid_sections = 0; in ice_vsi_cfg_tc()
3492 * ice_update_ring_stats - Update ring statistics
3501 stats->bytes += bytes; in ice_update_ring_stats()
3502 stats->pkts += pkts; in ice_update_ring_stats()
3506 * ice_update_tx_ring_stats - Update Tx ring specific counters
3513 u64_stats_update_begin(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3514 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); in ice_update_tx_ring_stats()
3515 u64_stats_update_end(&tx_ring->ring_stats->syncp); in ice_update_tx_ring_stats()
3519 * ice_update_rx_ring_stats - Update Rx ring specific counters
3526 u64_stats_update_begin(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3527 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); in ice_update_rx_ring_stats()
3528 u64_stats_update_end(&rx_ring->ring_stats->syncp); in ice_update_rx_ring_stats()
3532 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3546 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3554 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL); in ice_is_vsi_dflt_vsi()
3558 * ice_set_dflt_vsi - set the default forwarding VSI
3573 return -EINVAL; in ice_set_dflt_vsi()
3575 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3577 if (ice_lag_is_switchdev_running(vsi->back)) { in ice_set_dflt_vsi()
3579 vsi->vsi_num); in ice_set_dflt_vsi()
3586 vsi->vsi_num); in ice_set_dflt_vsi()
3590 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX); in ice_set_dflt_vsi()
3593 vsi->vsi_num, status); in ice_set_dflt_vsi()
3601 * ice_clear_dflt_vsi - clear the default forwarding VSI
3614 return -EINVAL; in ice_clear_dflt_vsi()
3616 dev = ice_pf_to_dev(vsi->back); in ice_clear_dflt_vsi()
3619 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) in ice_clear_dflt_vsi()
3620 return -ENODEV; in ice_clear_dflt_vsi()
3622 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false, in ice_clear_dflt_vsi()
3626 vsi->vsi_num, status); in ice_clear_dflt_vsi()
3627 return -EIO; in ice_clear_dflt_vsi()
3634 * ice_get_link_speed_mbps - get link speed in Mbps
3643 link_speed = vsi->port_info->phy.link_info.link_speed; in ice_get_link_speed_mbps()
3645 return (int)ice_get_link_speed(fls(link_speed) - 1); in ice_get_link_speed_mbps()
3649 * ice_get_link_speed_kbps - get link speed in Kbps
3664 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3669 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3674 struct ice_pf *pf = vsi->back; in ice_set_min_bw_limit()
3680 if (!vsi->port_info) { in ice_set_min_bw_limit()
3682 vsi->idx, vsi->type); in ice_set_min_bw_limit()
3683 return -EINVAL; in ice_set_min_bw_limit()
3689 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_min_bw_limit()
3691 return -EINVAL; in ice_set_min_bw_limit()
3696 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_min_bw_limit()
3700 min_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_min_bw_limit()
3701 vsi->idx); in ice_set_min_bw_limit()
3706 min_tx_rate, ice_vsi_type_str(vsi->type)); in ice_set_min_bw_limit()
3708 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_min_bw_limit()
3709 vsi->idx, 0, in ice_set_min_bw_limit()
3713 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3718 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_min_bw_limit()
3725 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3730 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3735 struct ice_pf *pf = vsi->back; in ice_set_max_bw_limit()
3741 if (!vsi->port_info) { in ice_set_max_bw_limit()
3743 vsi->idx, vsi->type); in ice_set_max_bw_limit()
3744 return -EINVAL; in ice_set_max_bw_limit()
3750 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx, in ice_set_max_bw_limit()
3752 return -EINVAL; in ice_set_max_bw_limit()
3757 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0, in ice_set_max_bw_limit()
3761 max_tx_rate, ice_vsi_type_str(vsi->type), in ice_set_max_bw_limit()
3762 vsi->idx); in ice_set_max_bw_limit()
3767 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3769 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info, in ice_set_max_bw_limit()
3770 vsi->idx, 0, in ice_set_max_bw_limit()
3774 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3779 ice_vsi_type_str(vsi->type), vsi->idx); in ice_set_max_bw_limit()
3786 * ice_set_link - turn on/off physical link
3792 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3793 struct ice_port_info *pi = vsi->port_info; in ice_set_link()
3794 struct ice_hw *hw = pi->hw; in ice_set_link()
3797 if (vsi->type != ICE_VSI_PF) in ice_set_link()
3798 return -EINVAL; in ice_set_link()
3807 if (status == -EIO) { in ice_set_link()
3808 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) in ice_set_link()
3811 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3815 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3823 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3846 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3847 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3851 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_add_vlan_zero()
3855 err = vlan_ops->add_vlan(vsi, &vlan); in ice_vsi_add_vlan_zero()
3856 if (err && err != -EEXIST) in ice_vsi_add_vlan_zero()
3863 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3876 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3877 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3881 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_del_vlan_zero()
3885 err = vlan_ops->del_vlan(vsi, &vlan); in ice_vsi_del_vlan_zero()
3886 if (err && err != -EEXIST) in ice_vsi_del_vlan_zero()
3892 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vsi_del_vlan_zero()
3897 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3908 if (vsi->type == ICE_VSI_VF) { in ice_vsi_num_zero_vlans()
3909 if (WARN_ON(!vsi->vf)) in ice_vsi_num_zero_vlans()
3912 if (ice_vf_is_port_vlan_ena(vsi->vf)) in ice_vsi_num_zero_vlans()
3916 if (ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_num_zero_vlans()
3923 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3924 * @vsi: VSI used to determine if any non-zero VLANs have been added
3928 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi)); in ice_vsi_has_non_zero_vlans()
3932 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3933 * @vsi: VSI used to get the number of non-zero VLANs added
3937 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi)); in ice_vsi_num_non_zero_vlans()
3952 return test_bit(f, pf->features); in ice_is_feature_supported()
3965 set_bit(f, pf->features); in ice_set_feature_support()
3978 clear_bit(f, pf->features); in ice_clear_feature_support()
3989 switch (pf->hw.device_id) { in ice_init_feature_support()
3995 if (ice_is_e810t(&pf->hw)) { in ice_init_feature_support()
3997 if (ice_gnss_is_gps_present(&pf->hw)) in ice_init_feature_support()
4007 * ice_vsi_update_security - update security block in VSI
4016 ctx.info = vsi->info; in ice_vsi_update_security()
4020 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_security()
4021 return -ENODEV; in ice_vsi_update_security()
4023 vsi->info = ctx.info; in ice_vsi_update_security()
4028 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
4033 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | in ice_vsi_ctx_set_antispoof()
4039 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
4044 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF & in ice_vsi_ctx_clear_antispoof()
4050 * ice_vsi_ctx_set_allow_override - allow destination override on VSI
4055 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_ctx_set_allow_override()
4059 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
4064 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_ctx_clear_allow_override()
4068 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
4076 .info = vsi->info, in ice_vsi_update_local_lb()
4085 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_local_lb()
4086 return -ENODEV; in ice_vsi_update_local_lb()
4088 vsi->info = ctx.info; in ice_vsi_update_local_lb()