Lines Matching +full:dcb +full:- +full:algorithm
1 // SPDX-License-Identifier: GPL-2.0
13 * ice_vsi_type_str - maps VSI type enum to string equivalents
33 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
47 for (i = 0; i < vsi->num_rxq; i++) in ice_vsi_ctrl_all_rx_rings()
50 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
52 for (i = 0; i < vsi->num_rxq; i++) { in ice_vsi_ctrl_all_rx_rings()
62 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
70 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
76 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
77 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
78 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
79 return -ENOMEM; in ice_vsi_alloc_arrays()
81 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
82 sizeof(*vsi->rx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
83 if (!vsi->rx_rings) in ice_vsi_alloc_arrays()
86 /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ in ice_vsi_alloc_arrays()
87 vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), in ice_vsi_alloc_arrays()
88 sizeof(*vsi->txq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
90 if (!vsi->txq_map) in ice_vsi_alloc_arrays()
93 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, in ice_vsi_alloc_arrays()
94 sizeof(*vsi->rxq_map), GFP_KERNEL); in ice_vsi_alloc_arrays()
95 if (!vsi->rxq_map) in ice_vsi_alloc_arrays()
99 if (vsi->type == ICE_VSI_LB) in ice_vsi_alloc_arrays()
103 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, in ice_vsi_alloc_arrays()
104 sizeof(*vsi->q_vectors), GFP_KERNEL); in ice_vsi_alloc_arrays()
105 if (!vsi->q_vectors) in ice_vsi_alloc_arrays()
108 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); in ice_vsi_alloc_arrays()
109 if (!vsi->af_xdp_zc_qps) in ice_vsi_alloc_arrays()
115 devm_kfree(dev, vsi->q_vectors); in ice_vsi_alloc_arrays()
117 devm_kfree(dev, vsi->rxq_map); in ice_vsi_alloc_arrays()
119 devm_kfree(dev, vsi->txq_map); in ice_vsi_alloc_arrays()
121 devm_kfree(dev, vsi->rx_rings); in ice_vsi_alloc_arrays()
123 devm_kfree(dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
124 return -ENOMEM; in ice_vsi_alloc_arrays()
128 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
133 switch (vsi->type) { in ice_vsi_set_num_desc()
138 * ethtool -G so we should keep those values instead of in ice_vsi_set_num_desc()
141 if (!vsi->num_rx_desc) in ice_vsi_set_num_desc()
142 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; in ice_vsi_set_num_desc()
143 if (!vsi->num_tx_desc) in ice_vsi_set_num_desc()
144 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; in ice_vsi_set_num_desc()
147 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
148 vsi->type); in ice_vsi_set_num_desc()
154 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
162 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
165 if (vsi->type == ICE_VSI_VF) in ice_vsi_set_num_qs()
166 vsi->vf_id = vf_id; in ice_vsi_set_num_qs()
168 vsi->vf_id = ICE_INVAL_VFID; in ice_vsi_set_num_qs()
170 switch (vsi->type) { in ice_vsi_set_num_qs()
172 if (vsi->req_txq) { in ice_vsi_set_num_qs()
173 vsi->alloc_txq = vsi->req_txq; in ice_vsi_set_num_qs()
174 vsi->num_txq = vsi->req_txq; in ice_vsi_set_num_qs()
176 vsi->alloc_txq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
181 pf->num_lan_tx = vsi->alloc_txq; in ice_vsi_set_num_qs()
184 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_num_qs()
185 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
187 if (vsi->req_rxq) { in ice_vsi_set_num_qs()
188 vsi->alloc_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
189 vsi->num_rxq = vsi->req_rxq; in ice_vsi_set_num_qs()
191 vsi->alloc_rxq = min3(pf->num_lan_msix, in ice_vsi_set_num_qs()
197 pf->num_lan_rx = vsi->alloc_rxq; in ice_vsi_set_num_qs()
199 vsi->num_q_vectors = min_t(int, pf->num_lan_msix, in ice_vsi_set_num_qs()
200 max_t(int, vsi->alloc_rxq, in ice_vsi_set_num_qs()
201 vsi->alloc_txq)); in ice_vsi_set_num_qs()
204 vf = &pf->vf[vsi->vf_id]; in ice_vsi_set_num_qs()
205 if (vf->num_req_qs) in ice_vsi_set_num_qs()
206 vf->num_vf_qs = vf->num_req_qs; in ice_vsi_set_num_qs()
207 vsi->alloc_txq = vf->num_vf_qs; in ice_vsi_set_num_qs()
208 vsi->alloc_rxq = vf->num_vf_qs; in ice_vsi_set_num_qs()
209 /* pf->num_msix_per_vf includes (VF miscellaneous vector + in ice_vsi_set_num_qs()
210 * data queue interrupts). Since vsi->num_q_vectors is number in ice_vsi_set_num_qs()
214 vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; in ice_vsi_set_num_qs()
217 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
218 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
219 vsi->num_q_vectors = 1; in ice_vsi_set_num_qs()
222 vsi->alloc_txq = 1; in ice_vsi_set_num_qs()
223 vsi->alloc_rxq = 1; in ice_vsi_set_num_qs()
226 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type); in ice_vsi_set_num_qs()
234 * ice_get_free_slot - get the next non-NULL location index in array
247 if (curr < (size - 1) && !tmp_array[curr + 1]) { in ice_get_free_slot()
263 * ice_vsi_delete - delete a VSI from the switch
268 struct ice_pf *pf = vsi->back; in ice_vsi_delete()
276 if (vsi->type == ICE_VSI_VF) in ice_vsi_delete()
277 ctxt->vf_num = vsi->vf_id; in ice_vsi_delete()
278 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete()
280 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete()
282 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete()
284 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n", in ice_vsi_delete()
285 vsi->vsi_num, ice_stat_str(status)); in ice_vsi_delete()
291 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
296 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
301 if (vsi->af_xdp_zc_qps) { in ice_vsi_free_arrays()
302 bitmap_free(vsi->af_xdp_zc_qps); in ice_vsi_free_arrays()
303 vsi->af_xdp_zc_qps = NULL; in ice_vsi_free_arrays()
306 if (vsi->q_vectors) { in ice_vsi_free_arrays()
307 devm_kfree(dev, vsi->q_vectors); in ice_vsi_free_arrays()
308 vsi->q_vectors = NULL; in ice_vsi_free_arrays()
310 if (vsi->tx_rings) { in ice_vsi_free_arrays()
311 devm_kfree(dev, vsi->tx_rings); in ice_vsi_free_arrays()
312 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
314 if (vsi->rx_rings) { in ice_vsi_free_arrays()
315 devm_kfree(dev, vsi->rx_rings); in ice_vsi_free_arrays()
316 vsi->rx_rings = NULL; in ice_vsi_free_arrays()
318 if (vsi->txq_map) { in ice_vsi_free_arrays()
319 devm_kfree(dev, vsi->txq_map); in ice_vsi_free_arrays()
320 vsi->txq_map = NULL; in ice_vsi_free_arrays()
322 if (vsi->rxq_map) { in ice_vsi_free_arrays()
323 devm_kfree(dev, vsi->rxq_map); in ice_vsi_free_arrays()
324 vsi->rxq_map = NULL; in ice_vsi_free_arrays()
329 * ice_vsi_clear - clean up and deallocate the provided VSI
345 if (!vsi->back) in ice_vsi_clear()
346 return -EINVAL; in ice_vsi_clear()
348 pf = vsi->back; in ice_vsi_clear()
351 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { in ice_vsi_clear()
352 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); in ice_vsi_clear()
353 return -EINVAL; in ice_vsi_clear()
356 mutex_lock(&pf->sw_mutex); in ice_vsi_clear()
359 pf->vsi[vsi->idx] = NULL; in ice_vsi_clear()
360 if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) in ice_vsi_clear()
361 pf->next_vsi = vsi->idx; in ice_vsi_clear()
362 if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && in ice_vsi_clear()
363 vsi->vf_id != ICE_INVAL_VFID) in ice_vsi_clear()
364 pf->next_vsi = vsi->idx; in ice_vsi_clear()
367 mutex_unlock(&pf->sw_mutex); in ice_vsi_clear()
374 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
382 if (!q_vector->tx.ring) in ice_msix_clean_ctrl_vsi()
386 ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET); in ice_msix_clean_ctrl_vsi()
387 ice_clean_ctrl_tx_irq(q_vector->tx.ring); in ice_msix_clean_ctrl_vsi()
393 * ice_msix_clean_rings - MSIX mode Interrupt Handler
401 if (!q_vector->tx.ring && !q_vector->rx.ring) in ice_msix_clean_rings()
404 q_vector->total_events++; in ice_msix_clean_rings()
406 napi_schedule(&q_vector->napi); in ice_msix_clean_rings()
412 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
426 mutex_lock(&pf->sw_mutex); in ice_vsi_alloc()
429 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index in ice_vsi_alloc()
432 if (pf->next_vsi == ICE_NO_VSI) { in ice_vsi_alloc()
441 vsi->type = vsi_type; in ice_vsi_alloc()
442 vsi->back = pf; in ice_vsi_alloc()
443 set_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_alloc()
450 switch (vsi->type) { in ice_vsi_alloc()
456 vsi->irq_handler = ice_msix_clean_rings; in ice_vsi_alloc()
463 vsi->irq_handler = ice_msix_clean_ctrl_vsi; in ice_vsi_alloc()
474 dev_warn(dev, "Unknown VSI type %d\n", vsi->type); in ice_vsi_alloc()
478 if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) { in ice_vsi_alloc()
480 vsi->idx = pf->num_alloc_vsi - 1; in ice_vsi_alloc()
481 pf->ctrl_vsi_idx = vsi->idx; in ice_vsi_alloc()
482 pf->vsi[vsi->idx] = vsi; in ice_vsi_alloc()
485 vsi->idx = pf->next_vsi; in ice_vsi_alloc()
486 pf->vsi[pf->next_vsi] = vsi; in ice_vsi_alloc()
488 /* prepare pf->next_vsi for next use */ in ice_vsi_alloc()
489 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, in ice_vsi_alloc()
490 pf->next_vsi); in ice_vsi_alloc()
493 if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID) in ice_vsi_alloc()
494 pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; in ice_vsi_alloc()
501 mutex_unlock(&pf->sw_mutex); in ice_vsi_alloc()
506 * ice_alloc_fd_res - Allocate FD resource for a VSI
511 * Returns 0 on success, -EPERM on no-op or -EIO on failure
515 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
524 g_val = pf->hw.func_caps.fd_fltr_guar; in ice_alloc_fd_res()
526 return -EPERM; in ice_alloc_fd_res()
529 b_val = pf->hw.func_caps.fd_fltr_best_effort; in ice_alloc_fd_res()
531 return -EPERM; in ice_alloc_fd_res()
533 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF)) in ice_alloc_fd_res()
534 return -EPERM; in ice_alloc_fd_res()
536 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_alloc_fd_res()
537 return -EPERM; in ice_alloc_fd_res()
539 vsi->num_gfltr = g_val / pf->num_alloc_vsi; in ice_alloc_fd_res()
542 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
544 if (vsi->type == ICE_VSI_VF) { in ice_alloc_fd_res()
545 vsi->num_gfltr = 0; in ice_alloc_fd_res()
548 vsi->num_bfltr = b_val; in ice_alloc_fd_res()
555 * ice_vsi_get_qs - Assign queues from PF to VSI
562 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
564 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
565 .pf_map = pf->avail_txqs, in ice_vsi_get_qs()
566 .pf_map_size = pf->max_pf_txqs, in ice_vsi_get_qs()
567 .q_count = vsi->alloc_txq, in ice_vsi_get_qs()
569 .vsi_map = vsi->txq_map, in ice_vsi_get_qs()
574 .qs_mutex = &pf->avail_q_mutex, in ice_vsi_get_qs()
575 .pf_map = pf->avail_rxqs, in ice_vsi_get_qs()
576 .pf_map_size = pf->max_pf_rxqs, in ice_vsi_get_qs()
577 .q_count = vsi->alloc_rxq, in ice_vsi_get_qs()
579 .vsi_map = vsi->rxq_map, in ice_vsi_get_qs()
588 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
593 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode; in ice_vsi_get_qs()
599 * ice_vsi_put_qs - Release queues from VSI to PF
604 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
607 mutex_lock(&pf->avail_q_mutex); in ice_vsi_put_qs()
609 for (i = 0; i < vsi->alloc_txq; i++) { in ice_vsi_put_qs()
610 clear_bit(vsi->txq_map[i], pf->avail_txqs); in ice_vsi_put_qs()
611 vsi->txq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
614 for (i = 0; i < vsi->alloc_rxq; i++) { in ice_vsi_put_qs()
615 clear_bit(vsi->rxq_map[i], pf->avail_rxqs); in ice_vsi_put_qs()
616 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX; in ice_vsi_put_qs()
619 mutex_unlock(&pf->avail_q_mutex); in ice_vsi_put_qs()
630 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_is_safe_mode()
641 return test_bit(ICE_FLAG_AUX_ENA, pf->flags); in ice_is_aux_ena()
645 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
653 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
659 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); in ice_vsi_clean_rss_flow_fld()
662 vsi->vsi_num, ice_stat_str(status)); in ice_vsi_clean_rss_flow_fld()
666 * ice_rss_clean - Delete RSS related VSI structures and configuration
671 struct ice_pf *pf = vsi->back; in ice_rss_clean()
676 if (vsi->rss_hkey_user) in ice_rss_clean()
677 devm_kfree(dev, vsi->rss_hkey_user); in ice_rss_clean()
678 if (vsi->rss_lut_user) in ice_rss_clean()
679 devm_kfree(dev, vsi->rss_lut_user); in ice_rss_clean()
684 ice_rem_vsi_rss_list(&pf->hw, vsi->idx); in ice_rss_clean()
688 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
694 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
696 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_set_rss_params()
697 vsi->rss_size = 1; in ice_vsi_set_rss_params()
701 cap = &pf->hw.func_caps.common_cap; in ice_vsi_set_rss_params()
702 switch (vsi->type) { in ice_vsi_set_rss_params()
705 vsi->rss_table_size = (u16)cap->rss_table_size; in ice_vsi_set_rss_params()
706 vsi->rss_size = min_t(u16, num_online_cpus(), in ice_vsi_set_rss_params()
707 BIT(cap->rss_table_entry_width)); in ice_vsi_set_rss_params()
708 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; in ice_vsi_set_rss_params()
714 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; in ice_vsi_set_rss_params()
715 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; in ice_vsi_set_rss_params()
716 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; in ice_vsi_set_rss_params()
722 ice_vsi_type_str(vsi->type)); in ice_vsi_set_rss_params()
728 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
737 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx()
739 ctxt->alloc_from_pool = true; in ice_set_dflt_vsi_ctx()
741 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; in ice_set_dflt_vsi_ctx()
743 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; in ice_set_dflt_vsi_ctx()
748 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & in ice_set_dflt_vsi_ctx()
760 ctxt->info.ingress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
761 ctxt->info.egress_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
763 ctxt->info.outer_up_table = cpu_to_le32(table); in ice_set_dflt_vsi_ctx()
768 * ice_vsi_setup_q_map - Setup a VSI queue map
776 u16 qcount_tx = vsi->alloc_txq; in ice_vsi_setup_q_map()
777 u16 qcount_rx = vsi->alloc_rxq; in ice_vsi_setup_q_map()
783 if (vsi->tc_cfg.numtc) { in ice_vsi_setup_q_map()
784 if (!(vsi->tc_cfg.ena_tc & BIT(0))) in ice_vsi_setup_q_map()
791 vsi->tc_cfg.numtc++; in ice_vsi_setup_q_map()
792 vsi->tc_cfg.ena_tc |= 1; in ice_vsi_setup_q_map()
795 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC); in ice_vsi_setup_q_map()
798 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc; in ice_vsi_setup_q_map()
802 /* find the (rounded up) power-of-2 of qcount */ in ice_vsi_setup_q_map()
808 * queues allocated to TC0. No:of queues is a power-of-2. in ice_vsi_setup_q_map()
817 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { in ice_vsi_setup_q_map()
819 vsi->tc_cfg.tc_info[i].qoffset = 0; in ice_vsi_setup_q_map()
820 vsi->tc_cfg.tc_info[i].qcount_rx = 1; in ice_vsi_setup_q_map()
821 vsi->tc_cfg.tc_info[i].qcount_tx = 1; in ice_vsi_setup_q_map()
822 vsi->tc_cfg.tc_info[i].netdev_tc = 0; in ice_vsi_setup_q_map()
823 ctxt->info.tc_mapping[i] = 0; in ice_vsi_setup_q_map()
828 vsi->tc_cfg.tc_info[i].qoffset = offset; in ice_vsi_setup_q_map()
829 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc; in ice_vsi_setup_q_map()
830 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; in ice_vsi_setup_q_map()
831 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; in ice_vsi_setup_q_map()
839 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map()
842 /* if offset is non-zero, means it is calculated correctly based on in ice_vsi_setup_q_map()
844 * be correct and non-zero because it is based off - VSI's in ice_vsi_setup_q_map()
849 vsi->num_rxq = offset; in ice_vsi_setup_q_map()
851 vsi->num_rxq = num_rxq_per_tc; in ice_vsi_setup_q_map()
853 vsi->num_txq = tx_count; in ice_vsi_setup_q_map()
855 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { in ice_vsi_setup_q_map()
856 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
860 vsi->num_txq = vsi->num_rxq; in ice_vsi_setup_q_map()
864 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); in ice_vsi_setup_q_map()
869 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); in ice_vsi_setup_q_map()
870 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); in ice_vsi_setup_q_map()
874 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
883 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && in ice_set_fd_vsi_ctx()
884 vsi->type != ICE_VSI_VF) in ice_set_fd_vsi_ctx()
888 ctxt->info.valid_sections |= cpu_to_le16(val); in ice_set_fd_vsi_ctx()
896 ctxt->info.fd_options = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
898 ctxt->info.max_fd_fltr_dedicated = in ice_set_fd_vsi_ctx()
899 cpu_to_le16(vsi->num_gfltr); in ice_set_fd_vsi_ctx()
901 ctxt->info.max_fd_fltr_shared = in ice_set_fd_vsi_ctx()
902 cpu_to_le16(vsi->num_bfltr); in ice_set_fd_vsi_ctx()
909 ctxt->info.fd_def_q = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
916 ctxt->info.fd_report_opt = cpu_to_le16(val); in ice_set_fd_vsi_ctx()
920 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
930 pf = vsi->back; in ice_set_rss_vsi_ctx()
933 switch (vsi->type) { in ice_set_rss_vsi_ctx()
946 ice_vsi_type_str(vsi->type)); in ice_set_rss_vsi_ctx()
950 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & in ice_set_rss_vsi_ctx()
957 * ice_vsi_init - Create and initialize a VSI
966 struct ice_pf *pf = vsi->back; in ice_vsi_init()
967 struct ice_hw *hw = &pf->hw; in ice_vsi_init()
975 return -ENOMEM; in ice_vsi_init()
977 switch (vsi->type) { in ice_vsi_init()
981 ctxt->flags = ICE_AQ_VSI_TYPE_PF; in ice_vsi_init()
984 ctxt->flags = ICE_AQ_VSI_TYPE_VF; in ice_vsi_init()
985 /* VF number here is the absolute VF number (0-255) */ in ice_vsi_init()
986 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; in ice_vsi_init()
989 ret = -ENODEV; in ice_vsi_init()
994 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) in ice_vsi_init()
997 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) in ice_vsi_init()
998 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_init()
1001 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) && in ice_vsi_init()
1002 vsi->type != ICE_VSI_CTRL) { in ice_vsi_init()
1008 ctxt->info.valid_sections |= in ice_vsi_init()
1012 ctxt->info.sw_id = vsi->port_info->sw_id; in ice_vsi_init()
1018 ctxt->info.valid_sections |= in ice_vsi_init()
1021 /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off in ice_vsi_init()
1024 if (vsi->type == ICE_VSI_VF) { in ice_vsi_init()
1025 ctxt->info.valid_sections |= in ice_vsi_init()
1027 if (pf->vf[vsi->vf_id].spoofchk) { in ice_vsi_init()
1028 ctxt->info.sec_flags |= in ice_vsi_init()
1033 ctxt->info.sec_flags &= in ice_vsi_init()
1041 if (vsi->type == ICE_VSI_PF) { in ice_vsi_init()
1042 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; in ice_vsi_init()
1043 ctxt->info.valid_sections |= in ice_vsi_init()
1048 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1051 ret = -EIO; in ice_vsi_init()
1055 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_init()
1058 ret = -EIO; in ice_vsi_init()
1064 vsi->info = ctxt->info; in ice_vsi_init()
1067 vsi->vsi_num = ctxt->vsi_num; in ice_vsi_init()
1075 * ice_free_res - free a block of resources
1087 if (!res || index >= res->end) in ice_free_res()
1088 return -EINVAL; in ice_free_res()
1091 for (i = index; i < res->end && res->list[i] == id; i++) { in ice_free_res()
1092 res->list[i] = 0; in ice_free_res()
1100 * ice_search_res - Search the tracker for a block of resources
1105 * Returns the base item index of the block, or -ENOMEM for error
1111 if (needed > res->end) in ice_search_res()
1112 return -ENOMEM; in ice_search_res()
1118 if (res->list[end++] & ICE_RES_VALID_BIT) { in ice_search_res()
1120 if ((start + needed) > res->end) in ice_search_res()
1129 res->list[i++] = id; in ice_search_res()
1133 } while (end < res->end); in ice_search_res()
1135 return -ENOMEM; in ice_search_res()
1139 * ice_get_free_res_count - Get free count from a resource tracker
1146 for (i = 0; i < res->end; i++) in ice_get_free_res_count()
1147 if (!(res->list[i] & ICE_RES_VALID_BIT)) in ice_get_free_res_count()
1154 * ice_get_res - get a block of resources
1166 return -EINVAL; in ice_get_res()
1168 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { in ice_get_res()
1170 needed, res->num_entries, id); in ice_get_res()
1171 return -EINVAL; in ice_get_res()
1178 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
1189 struct ice_pf *pf = vsi->back; in ice_vsi_setup_vector_base()
1196 if (vsi->type == ICE_VSI_VF) in ice_vsi_setup_vector_base()
1199 if (vsi->base_vector) { in ice_vsi_setup_vector_base()
1200 dev_dbg(dev, "VSI %d has non-zero base vector %d\n", in ice_vsi_setup_vector_base()
1201 vsi->vsi_num, vsi->base_vector); in ice_vsi_setup_vector_base()
1202 return -EEXIST; in ice_vsi_setup_vector_base()
1205 num_q_vectors = vsi->num_q_vectors; in ice_vsi_setup_vector_base()
1207 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { in ice_vsi_setup_vector_base()
1211 struct ice_vf *vf = &pf->vf[i]; in ice_vsi_setup_vector_base()
1213 if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_vsi_setup_vector_base()
1214 base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; in ice_vsi_setup_vector_base()
1218 if (i == pf->num_alloc_vfs) in ice_vsi_setup_vector_base()
1219 base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, in ice_vsi_setup_vector_base()
1222 base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, in ice_vsi_setup_vector_base()
1223 vsi->idx); in ice_vsi_setup_vector_base()
1227 dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", in ice_vsi_setup_vector_base()
1228 ice_get_free_res_count(pf->irq_tracker), in ice_vsi_setup_vector_base()
1229 ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors); in ice_vsi_setup_vector_base()
1230 return -ENOENT; in ice_vsi_setup_vector_base()
1232 vsi->base_vector = (u16)base; in ice_vsi_setup_vector_base()
1233 pf->num_avail_sw_msix -= num_q_vectors; in ice_vsi_setup_vector_base()
1239 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1247 if (vsi->q_vectors) { in ice_vsi_clear_rings()
1249 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_clear_rings()
1252 q_vector->tx.ring = NULL; in ice_vsi_clear_rings()
1253 q_vector->rx.ring = NULL; in ice_vsi_clear_rings()
1258 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1259 for (i = 0; i < vsi->alloc_txq; i++) { in ice_vsi_clear_rings()
1260 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1261 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
1262 WRITE_ONCE(vsi->tx_rings[i], NULL); in ice_vsi_clear_rings()
1266 if (vsi->rx_rings) { in ice_vsi_clear_rings()
1267 for (i = 0; i < vsi->alloc_rxq; i++) { in ice_vsi_clear_rings()
1268 if (vsi->rx_rings[i]) { in ice_vsi_clear_rings()
1269 kfree_rcu(vsi->rx_rings[i], rcu); in ice_vsi_clear_rings()
1270 WRITE_ONCE(vsi->rx_rings[i], NULL); in ice_vsi_clear_rings()
1277 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1282 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1288 for (i = 0; i < vsi->alloc_txq; i++) { in ice_vsi_alloc_rings()
1297 ring->q_index = i; in ice_vsi_alloc_rings()
1298 ring->reg_idx = vsi->txq_map[i]; in ice_vsi_alloc_rings()
1299 ring->ring_active = false; in ice_vsi_alloc_rings()
1300 ring->vsi = vsi; in ice_vsi_alloc_rings()
1301 ring->tx_tstamps = &pf->ptp.port.tx; in ice_vsi_alloc_rings()
1302 ring->dev = dev; in ice_vsi_alloc_rings()
1303 ring->count = vsi->num_tx_desc; in ice_vsi_alloc_rings()
1304 WRITE_ONCE(vsi->tx_rings[i], ring); in ice_vsi_alloc_rings()
1308 for (i = 0; i < vsi->alloc_rxq; i++) { in ice_vsi_alloc_rings()
1316 ring->q_index = i; in ice_vsi_alloc_rings()
1317 ring->reg_idx = vsi->rxq_map[i]; in ice_vsi_alloc_rings()
1318 ring->ring_active = false; in ice_vsi_alloc_rings()
1319 ring->vsi = vsi; in ice_vsi_alloc_rings()
1320 ring->netdev = vsi->netdev; in ice_vsi_alloc_rings()
1321 ring->dev = dev; in ice_vsi_alloc_rings()
1322 ring->count = vsi->num_rx_desc; in ice_vsi_alloc_rings()
1323 WRITE_ONCE(vsi->rx_rings[i], ring); in ice_vsi_alloc_rings()
1330 return -ENOMEM; in ice_vsi_alloc_rings()
1334 * ice_vsi_manage_rss_lut - disable/enable RSS
1346 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_manage_rss_lut()
1351 if (vsi->rss_lut_user) in ice_vsi_manage_rss_lut()
1352 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1354 ice_fill_rss_lut(lut, vsi->rss_table_size, in ice_vsi_manage_rss_lut()
1355 vsi->rss_size); in ice_vsi_manage_rss_lut()
1358 ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_manage_rss_lut()
1363 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1368 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1374 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq); in ice_vsi_cfg_rss_lut_key()
1376 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in ice_vsi_cfg_rss_lut_key()
1378 return -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1380 if (vsi->rss_lut_user) in ice_vsi_cfg_rss_lut_key()
1381 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1383 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); in ice_vsi_cfg_rss_lut_key()
1385 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); in ice_vsi_cfg_rss_lut_key()
1393 err = -ENOMEM; in ice_vsi_cfg_rss_lut_key()
1397 if (vsi->rss_hkey_user) in ice_vsi_cfg_rss_lut_key()
1398 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE); in ice_vsi_cfg_rss_lut_key()
1413 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1422 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1429 vsi->vsi_num); in ice_vsi_set_vf_rss_flow_fld()
1433 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); in ice_vsi_set_vf_rss_flow_fld()
1436 vsi->vsi_num, ice_stat_str(status)); in ice_vsi_set_vf_rss_flow_fld()
1440 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1452 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; in ice_vsi_set_rss_flow_fld()
1453 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1454 struct ice_hw *hw = &pf->hw; in ice_vsi_set_rss_flow_fld()
1522 * ice_pf_state_is_nominal - checks the PF for nominal state
1539 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS)) in ice_pf_state_is_nominal()
1546 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1552 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1553 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */ in ice_update_eth_stats()
1555 prev_es = &vsi->eth_stats_prev; in ice_update_eth_stats()
1556 cur_es = &vsi->eth_stats; in ice_update_eth_stats()
1558 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1559 &prev_es->rx_bytes, &cur_es->rx_bytes); in ice_update_eth_stats()
1561 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1562 &prev_es->rx_unicast, &cur_es->rx_unicast); in ice_update_eth_stats()
1564 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1565 &prev_es->rx_multicast, &cur_es->rx_multicast); in ice_update_eth_stats()
1567 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1568 &prev_es->rx_broadcast, &cur_es->rx_broadcast); in ice_update_eth_stats()
1570 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1571 &prev_es->rx_discards, &cur_es->rx_discards); in ice_update_eth_stats()
1573 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1574 &prev_es->tx_bytes, &cur_es->tx_bytes); in ice_update_eth_stats()
1576 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1577 &prev_es->tx_unicast, &cur_es->tx_unicast); in ice_update_eth_stats()
1579 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1580 &prev_es->tx_multicast, &cur_es->tx_multicast); in ice_update_eth_stats()
1582 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1583 &prev_es->tx_broadcast, &cur_es->tx_broadcast); in ice_update_eth_stats()
1585 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, in ice_update_eth_stats()
1586 &prev_es->tx_errors, &cur_es->tx_errors); in ice_update_eth_stats()
1588 vsi->stat_offsets_loaded = true; in ice_update_eth_stats()
1592 * ice_vsi_add_vlan - Add VSI membership for given VLAN
1600 struct ice_pf *pf = vsi->back; in ice_vsi_add_vlan()
1607 vsi->num_vlan++; in ice_vsi_add_vlan()
1609 err = -ENODEV; in ice_vsi_add_vlan()
1611 vsi->vsi_num); in ice_vsi_add_vlan()
1618 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
1626 struct ice_pf *pf = vsi->back; in ice_vsi_kill_vlan()
1635 vsi->num_vlan--; in ice_vsi_kill_vlan()
1638 vid, vsi->vsi_num, ice_stat_str(status)); in ice_vsi_kill_vlan()
1641 vid, vsi->vsi_num, ice_stat_str(status)); in ice_vsi_kill_vlan()
1642 err = -EIO; in ice_vsi_kill_vlan()
1649 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1654 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { in ice_vsi_cfg_frame_size()
1655 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; in ice_vsi_cfg_frame_size()
1656 vsi->rx_buf_len = ICE_RXBUF_2048; in ice_vsi_cfg_frame_size()
1659 (vsi->netdev->mtu <= ETH_DATA_LEN)) { in ice_vsi_cfg_frame_size()
1660 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1661 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; in ice_vsi_cfg_frame_size()
1664 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; in ice_vsi_cfg_frame_size()
1666 vsi->rx_buf_len = ICE_RXBUF_3072; in ice_vsi_cfg_frame_size()
1668 vsi->rx_buf_len = ICE_RXBUF_2048; in ice_vsi_cfg_frame_size()
1674 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1707 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq()
1708 return -EINVAL; in ice_vsi_cfg_single_rxq()
1710 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq()
1718 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq()
1719 return -EINVAL; in ice_vsi_cfg_single_txq()
1723 return -ENOMEM; in ice_vsi_cfg_single_txq()
1725 qg_buf->num_txqs = 1; in ice_vsi_cfg_single_txq()
1733 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1743 if (vsi->type == ICE_VSI_VF) in ice_vsi_cfg_rxqs()
1750 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); in ice_vsi_cfg_rxqs()
1760 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1777 return -ENOMEM; in ice_vsi_cfg_txqs()
1779 qg_buf->num_txqs = 1; in ice_vsi_cfg_txqs()
1793 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1801 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); in ice_vsi_cfg_lan_txqs()
1805 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1816 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_cfg_xdp_txqs()
1820 for (i = 0; i < vsi->num_xdp_txq; i++) in ice_vsi_cfg_xdp_txqs()
1821 vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]); in ice_vsi_cfg_xdp_txqs()
1827 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1844 * ice_write_intrl - write throttle rate limit to interrupt specific register
1850 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1852 wr32(hw, GLINT_RATE(q_vector->reg_idx), in ice_write_intrl()
1857 * __ice_write_itr - write throttle rate to register
1865 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
1867 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), in __ice_write_itr()
1872 * ice_write_itr - write throttle rate to queue specific register
1880 if (!rc->ring) in ice_write_itr()
1883 q_vector = rc->ring->q_vector; in ice_write_itr()
1889 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1897 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
1898 struct ice_hw *hw = &pf->hw; in ice_vsi_cfg_msix()
1902 for (i = 0; i < vsi->num_q_vectors; i++) { in ice_vsi_cfg_msix()
1903 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_cfg_msix()
1904 u16 reg_idx = q_vector->reg_idx; in ice_vsi_cfg_msix()
1913 * For SR-IOV VF VSIs queue vector index always starts in ice_vsi_cfg_msix()
1919 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_cfg_msix()
1921 q_vector->tx.itr_idx); in ice_vsi_cfg_msix()
1925 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_cfg_msix()
1927 q_vector->rx.itr_idx); in ice_vsi_cfg_msix()
1934 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
1939 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_manage_vlan_insertion()
1946 return -ENOMEM; in ice_vsi_manage_vlan_insertion()
1952 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; in ice_vsi_manage_vlan_insertion()
1955 ctxt->info.vlan_flags |= (vsi->info.vlan_flags & in ice_vsi_manage_vlan_insertion()
1958 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_insertion()
1960 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_insertion()
1962 dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n", in ice_vsi_manage_vlan_insertion()
1964 ice_aq_str(hw->adminq.sq_last_status)); in ice_vsi_manage_vlan_insertion()
1965 ret = -EIO; in ice_vsi_manage_vlan_insertion()
1969 vsi->info.vlan_flags = ctxt->info.vlan_flags; in ice_vsi_manage_vlan_insertion()
1976 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
1982 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_manage_vlan_stripping()
1990 if (vsi->info.pvid) in ice_vsi_manage_vlan_stripping()
1995 return -ENOMEM; in ice_vsi_manage_vlan_stripping()
2003 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; in ice_vsi_manage_vlan_stripping()
2006 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; in ice_vsi_manage_vlan_stripping()
2009 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; in ice_vsi_manage_vlan_stripping()
2011 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_stripping()
2013 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_stripping()
2015 dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n", in ice_vsi_manage_vlan_stripping()
2017 ice_aq_str(hw->adminq.sq_last_status)); in ice_vsi_manage_vlan_stripping()
2018 ret = -EIO; in ice_vsi_manage_vlan_stripping()
2022 vsi->info.vlan_flags = ctxt->info.vlan_flags; in ice_vsi_manage_vlan_stripping()
2029 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
2040 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
2051 * ice_vsi_stop_tx_rings - Disable Tx rings
2064 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) in ice_vsi_stop_tx_rings()
2065 return -EINVAL; in ice_vsi_stop_tx_rings()
2072 return -EINVAL; in ice_vsi_stop_tx_rings()
2086 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2095 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); in ice_vsi_stop_lan_tx_rings()
2099 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2104 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
2108 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
2118 return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA); in ice_vsi_is_vlan_pruning_ena()
2122 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
2136 return -EINVAL; in ice_cfg_vlan_pruning()
2142 if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena) in ice_cfg_vlan_pruning()
2145 pf = vsi->back; in ice_cfg_vlan_pruning()
2148 return -ENOMEM; in ice_cfg_vlan_pruning()
2150 ctxt->info = vsi->info; in ice_cfg_vlan_pruning()
2153 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_cfg_vlan_pruning()
2155 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_cfg_vlan_pruning()
2158 ctxt->info.valid_sections = in ice_cfg_vlan_pruning()
2161 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); in ice_cfg_vlan_pruning()
2163 …netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, … in ice_cfg_vlan_pruning()
2164 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, in ice_cfg_vlan_pruning()
2166 ice_aq_str(pf->hw.adminq.sq_last_status)); in ice_cfg_vlan_pruning()
2170 vsi->info.sw_flags2 = ctxt->info.sw_flags2; in ice_cfg_vlan_pruning()
2177 return -EIO; in ice_cfg_vlan_pruning()
2182 struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; in ice_vsi_set_tc_cfg()
2184 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); in ice_vsi_set_tc_cfg()
2185 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); in ice_vsi_set_tc_cfg()
2189 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
2197 if (!vsi || !vsi->q_vectors) in ice_vsi_set_q_vectors_reg_idx()
2198 return -EINVAL; in ice_vsi_set_q_vectors_reg_idx()
2201 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_set_q_vectors_reg_idx()
2204 dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n", in ice_vsi_set_q_vectors_reg_idx()
2205 i, vsi->vsi_num); in ice_vsi_set_q_vectors_reg_idx()
2209 if (vsi->type == ICE_VSI_VF) { in ice_vsi_set_q_vectors_reg_idx()
2210 struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; in ice_vsi_set_q_vectors_reg_idx()
2212 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); in ice_vsi_set_q_vectors_reg_idx()
2214 q_vector->reg_idx = in ice_vsi_set_q_vectors_reg_idx()
2215 q_vector->v_idx + vsi->base_vector; in ice_vsi_set_q_vectors_reg_idx()
2223 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_set_q_vectors_reg_idx()
2226 q_vector->reg_idx = 0; in ice_vsi_set_q_vectors_reg_idx()
2229 return -EINVAL; in ice_vsi_set_q_vectors_reg_idx()
2233 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2242 struct ice_pf *pf = vsi->back; in ice_cfg_sw_lldp()
2253 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { in ice_cfg_sw_lldp()
2254 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, in ice_cfg_sw_lldp()
2265 vsi->vsi_num, ice_stat_str(status)); in ice_cfg_sw_lldp()
2269 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2277 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2283 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2289 * - PF aggregator node to contains VSIs of type _PF and _CTRL in ice_set_agg_vsi()
2290 * - VF aggregator nodes will contain VF VSI in ice_set_agg_vsi()
2292 port_info = pf->hw.port_info; in ice_set_agg_vsi()
2296 switch (vsi->type) { in ice_set_agg_vsi()
2302 agg_node_iter = &pf->pf_agg_node[0]; in ice_set_agg_vsi()
2313 agg_node_iter = &pf->vf_agg_node[0]; in ice_set_agg_vsi()
2318 ice_vsi_type_str(vsi->type)); in ice_set_agg_vsi()
2327 if (agg_node_iter->num_vsis && in ice_set_agg_vsi()
2328 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { in ice_set_agg_vsi()
2333 if (agg_node_iter->valid && in ice_set_agg_vsi()
2334 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2335 agg_id = agg_node_iter->agg_id; in ice_set_agg_vsi()
2341 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) { in ice_set_agg_vsi()
2354 if (!agg_node->valid) { in ice_set_agg_vsi()
2356 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2363 agg_node->valid = true; in ice_set_agg_vsi()
2364 agg_node->agg_id = agg_id; in ice_set_agg_vsi()
2368 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx, in ice_set_agg_vsi()
2369 (u8)vsi->tc_cfg.ena_tc); in ice_set_agg_vsi()
2372 vsi->idx, agg_id); in ice_set_agg_vsi()
2377 agg_node->num_vsis++; in ice_set_agg_vsi()
2379 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved in ice_set_agg_vsi()
2382 vsi->agg_node = agg_node; in ice_set_agg_vsi()
2384 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id, in ice_set_agg_vsi()
2385 vsi->agg_node->num_vsis); in ice_set_agg_vsi()
2389 * ice_vsi_setup - Set up a VSI by a given type
2395 * fill-in ICE_INVAL_VFID as input.
2422 vsi->port_info = pi; in ice_vsi_setup()
2423 vsi->vsw = pf->first_sw; in ice_vsi_setup()
2424 if (vsi->type == ICE_VSI_PF) in ice_vsi_setup()
2425 vsi->ethtype = ETH_P_PAUSE; in ice_vsi_setup()
2427 if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL) in ice_vsi_setup()
2428 vsi->vf_id = vf_id; in ice_vsi_setup()
2433 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", in ice_vsi_setup()
2434 vsi->idx); in ice_vsi_setup()
2449 switch (vsi->type) { in ice_vsi_setup()
2482 if (vsi->type != ICE_VSI_CTRL) in ice_vsi_setup()
2487 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_setup()
2515 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { in ice_vsi_setup()
2531 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_vsi_setup()
2532 max_txqs[i] = vsi->alloc_txq; in ice_vsi_setup()
2534 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_setup()
2538 vsi->vsi_num, ice_stat_str(status)); in ice_vsi_setup()
2548 * be dropped so that VFs cannot send LLDP packets to reconfig DCB in ice_vsi_setup()
2552 if (vsi->type == ICE_VSI_PF) { in ice_vsi_setup()
2558 if (!vsi->agg_node) in ice_vsi_setup()
2566 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); in ice_vsi_setup()
2567 pf->num_avail_sw_msix += vsi->num_q_vectors; in ice_vsi_setup()
2576 ice_enable_lag(pf->lag); in ice_vsi_setup()
2583 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2588 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2589 struct ice_hw *hw = &pf->hw; in ice_vsi_release_msix()
2594 for (i = 0; i < vsi->num_q_vectors; i++) { in ice_vsi_release_msix()
2595 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_release_msix()
2598 for (q = 0; q < q_vector->num_ring_tx; q++) { in ice_vsi_release_msix()
2599 ice_write_itr(&q_vector->tx, 0); in ice_vsi_release_msix()
2600 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); in ice_vsi_release_msix()
2602 u32 xdp_txq = txq + vsi->num_xdp_txq; in ice_vsi_release_msix()
2604 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); in ice_vsi_release_msix()
2609 for (q = 0; q < q_vector->num_ring_rx; q++) { in ice_vsi_release_msix()
2610 ice_write_itr(&q_vector->rx, 0); in ice_vsi_release_msix()
2611 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0); in ice_vsi_release_msix()
2620 * ice_vsi_free_irq - Free the IRQ association with the OS
2625 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2626 int base = vsi->base_vector; in ice_vsi_free_irq()
2629 if (!vsi->q_vectors || !vsi->irqs_ready) in ice_vsi_free_irq()
2633 if (vsi->type == ICE_VSI_VF) in ice_vsi_free_irq()
2636 vsi->irqs_ready = false; in ice_vsi_free_irq()
2641 irq_num = pf->msix_entries[vector].vector; in ice_vsi_free_irq()
2644 if (!vsi->q_vectors[i] || in ice_vsi_free_irq()
2645 !(vsi->q_vectors[i]->num_ring_tx || in ice_vsi_free_irq()
2646 vsi->q_vectors[i]->num_ring_rx)) in ice_vsi_free_irq()
2655 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); in ice_vsi_free_irq()
2660 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2667 if (!vsi->tx_rings) in ice_vsi_free_tx_rings()
2671 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_vsi_free_tx_rings()
2672 ice_free_tx_ring(vsi->tx_rings[i]); in ice_vsi_free_tx_rings()
2676 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2683 if (!vsi->rx_rings) in ice_vsi_free_rx_rings()
2687 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in ice_vsi_free_rx_rings()
2688 ice_free_rx_ring(vsi->rx_rings[i]); in ice_vsi_free_rx_rings()
2692 * ice_vsi_close - Shut down a VSI
2697 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_vsi_close()
2706 * ice_ena_vsi - resume a VSI
2714 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state)) in ice_ena_vsi()
2717 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_ena_vsi()
2719 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_ena_vsi()
2720 if (netif_running(vsi->netdev)) { in ice_ena_vsi()
2724 err = ice_open_internal(vsi->netdev); in ice_ena_vsi()
2729 } else if (vsi->type == ICE_VSI_CTRL) { in ice_ena_vsi()
2737 * ice_dis_vsi - pause a VSI
2743 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_dis_vsi()
2746 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_dis_vsi()
2748 if (vsi->type == ICE_VSI_PF && vsi->netdev) { in ice_dis_vsi()
2749 if (netif_running(vsi->netdev)) { in ice_dis_vsi()
2760 } else if (vsi->type == ICE_VSI_CTRL) { in ice_dis_vsi()
2766 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2767 * @vsi: the VSI being un-configured
2771 int base = vsi->base_vector; in ice_vsi_dis_irq()
2772 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
2773 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
2778 if (vsi->tx_rings) { in ice_vsi_dis_irq()
2780 if (vsi->tx_rings[i]) { in ice_vsi_dis_irq()
2783 reg = vsi->tx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2791 if (vsi->rx_rings) { in ice_vsi_dis_irq()
2793 if (vsi->rx_rings[i]) { in ice_vsi_dis_irq()
2796 reg = vsi->rx_rings[i]->reg_idx; in ice_vsi_dis_irq()
2806 if (!vsi->q_vectors[i]) in ice_vsi_dis_irq()
2808 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); in ice_vsi_dis_irq()
2814 if (vsi->type == ICE_VSI_VF) in ice_vsi_dis_irq()
2818 synchronize_irq(pf->msix_entries[i + base].vector); in ice_vsi_dis_irq()
2822 * ice_napi_del - Remove NAPI handler for the VSI
2829 if (!vsi->netdev) in ice_napi_del()
2833 netif_napi_del(&vsi->q_vectors[v_idx]->napi); in ice_napi_del()
2837 * ice_vsi_release - Delete a VSI and free its resources
2847 if (!vsi->back) in ice_vsi_release()
2848 return -ENODEV; in ice_vsi_release()
2849 pf = vsi->back; in ice_vsi_release()
2857 if (vsi->netdev && !ice_is_reset_in_progress(pf->state) && in ice_vsi_release()
2858 (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) { in ice_vsi_release()
2859 unregister_netdev(vsi->netdev); in ice_vsi_release()
2860 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_vsi_release()
2865 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) in ice_vsi_release()
2869 if (vsi->type != ICE_VSI_LB) in ice_vsi_release()
2873 /* SR-IOV determines needed MSIX resources all at once instead of per in ice_vsi_release()
2875 * many interrupts each VF needs. SR-IOV MSIX resources are also in ice_vsi_release()
2878 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) { in ice_vsi_release()
2882 struct ice_vf *vf = &pf->vf[i]; in ice_vsi_release()
2884 if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) in ice_vsi_release()
2887 if (i == pf->num_alloc_vfs) { in ice_vsi_release()
2891 ice_free_res(pf->irq_tracker, vsi->base_vector, in ice_vsi_release()
2893 pf->num_avail_sw_msix += vsi->num_q_vectors; in ice_vsi_release()
2895 } else if (vsi->type != ICE_VSI_VF) { in ice_vsi_release()
2897 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); in ice_vsi_release()
2898 pf->num_avail_sw_msix += vsi->num_q_vectors; in ice_vsi_release()
2902 if (vsi->type == ICE_VSI_PF) { in ice_vsi_release()
2909 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) in ice_vsi_release()
2915 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_release()
2916 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_release()
2918 dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", in ice_vsi_release()
2919 vsi->vsi_num, err); in ice_vsi_release()
2923 if (vsi->netdev) { in ice_vsi_release()
2924 if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { in ice_vsi_release()
2925 unregister_netdev(vsi->netdev); in ice_vsi_release()
2926 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_vsi_release()
2928 if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) { in ice_vsi_release()
2929 free_netdev(vsi->netdev); in ice_vsi_release()
2930 vsi->netdev = NULL; in ice_vsi_release()
2931 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_vsi_release()
2935 if (vsi->type == ICE_VSI_VF && in ice_vsi_release()
2936 vsi->agg_node && vsi->agg_node->valid) in ice_vsi_release()
2937 vsi->agg_node->num_vsis--; in ice_vsi_release()
2946 if (!ice_is_reset_in_progress(pf->state)) in ice_vsi_release()
2953 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2966 struct ice_q_vector *q_vector = vsi->q_vectors[i]; in ice_vsi_rebuild_get_coalesce()
2968 coalesce[i].itr_tx = q_vector->tx.itr_setting; in ice_vsi_rebuild_get_coalesce()
2969 coalesce[i].itr_rx = q_vector->rx.itr_setting; in ice_vsi_rebuild_get_coalesce()
2970 coalesce[i].intrl = q_vector->intrl; in ice_vsi_rebuild_get_coalesce()
2972 if (i < vsi->num_txq) in ice_vsi_rebuild_get_coalesce()
2974 if (i < vsi->num_rxq) in ice_vsi_rebuild_get_coalesce()
2978 return vsi->num_q_vectors; in ice_vsi_rebuild_get_coalesce()
2982 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
3007 for (i = 0; i < size && i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3020 * as there is no harm because the dynamic algorithm in ice_vsi_rebuild_set_coalesce()
3023 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { in ice_vsi_rebuild_set_coalesce()
3024 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3025 rc->itr_setting = coalesce[i].itr_rx; in ice_vsi_rebuild_set_coalesce()
3026 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3027 } else if (i < vsi->alloc_rxq) { in ice_vsi_rebuild_set_coalesce()
3028 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3029 rc->itr_setting = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3030 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3033 if (i < vsi->alloc_txq && coalesce[i].tx_valid) { in ice_vsi_rebuild_set_coalesce()
3034 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3035 rc->itr_setting = coalesce[i].itr_tx; in ice_vsi_rebuild_set_coalesce()
3036 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3037 } else if (i < vsi->alloc_txq) { in ice_vsi_rebuild_set_coalesce()
3038 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3039 rc->itr_setting = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3040 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3043 vsi->q_vectors[i]->intrl = coalesce[i].intrl; in ice_vsi_rebuild_set_coalesce()
3044 ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl); in ice_vsi_rebuild_set_coalesce()
3050 for (; i < vsi->num_q_vectors; i++) { in ice_vsi_rebuild_set_coalesce()
3052 rc = &vsi->q_vectors[i]->tx; in ice_vsi_rebuild_set_coalesce()
3053 rc->itr_setting = coalesce[0].itr_tx; in ice_vsi_rebuild_set_coalesce()
3054 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3057 rc = &vsi->q_vectors[i]->rx; in ice_vsi_rebuild_set_coalesce()
3058 rc->itr_setting = coalesce[0].itr_rx; in ice_vsi_rebuild_set_coalesce()
3059 ice_write_itr(rc, rc->itr_setting); in ice_vsi_rebuild_set_coalesce()
3061 vsi->q_vectors[i]->intrl = coalesce[0].intrl; in ice_vsi_rebuild_set_coalesce()
3062 ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl); in ice_vsi_rebuild_set_coalesce()
3067 * ice_vsi_rebuild - Rebuild VSI after reset
3085 return -EINVAL; in ice_vsi_rebuild()
3087 pf = vsi->back; in ice_vsi_rebuild()
3088 vtype = vsi->type; in ice_vsi_rebuild()
3090 vf = &pf->vf[vsi->vf_id]; in ice_vsi_rebuild()
3092 coalesce = kcalloc(vsi->num_q_vectors, in ice_vsi_rebuild()
3095 return -ENOMEM; in ice_vsi_rebuild()
3099 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); in ice_vsi_rebuild()
3100 ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); in ice_vsi_rebuild()
3102 dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", in ice_vsi_rebuild()
3103 vsi->vsi_num, ret); in ice_vsi_rebuild()
3106 /* SR-IOV determines needed MSIX resources all at once instead of per in ice_vsi_rebuild()
3108 * many interrupts each VF needs. SR-IOV MSIX resources are also in ice_vsi_rebuild()
3113 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); in ice_vsi_rebuild()
3114 pf->num_avail_sw_msix += vsi->num_q_vectors; in ice_vsi_rebuild()
3115 vsi->base_vector = 0; in ice_vsi_rebuild()
3127 ice_vsi_set_num_qs(vsi, vf->vf_id); in ice_vsi_rebuild()
3166 vsi->num_xdp_txq = vsi->alloc_rxq; in ice_vsi_rebuild()
3167 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); in ice_vsi_rebuild()
3177 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) in ice_vsi_rebuild()
3199 for (i = 0; i < vsi->tc_cfg.numtc; i++) { in ice_vsi_rebuild()
3200 max_txqs[i] = vsi->alloc_txq; in ice_vsi_rebuild()
3203 max_txqs[i] += vsi->num_xdp_txq; in ice_vsi_rebuild()
3206 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_rebuild()
3210 vsi->vsi_num, ice_stat_str(status)); in ice_vsi_rebuild()
3212 ret = -EIO; in ice_vsi_rebuild()
3226 if (vsi->netdev) { in ice_vsi_rebuild()
3227 vsi->current_netdev_flags = 0; in ice_vsi_rebuild()
3228 unregister_netdev(vsi->netdev); in ice_vsi_rebuild()
3229 free_netdev(vsi->netdev); in ice_vsi_rebuild()
3230 vsi->netdev = NULL; in ice_vsi_rebuild()
3234 set_bit(ICE_RESET_FAILED, pf->state); in ice_vsi_rebuild()
3240 * ice_is_reset_in_progress - check for a reset in progress
3252 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3261 * Returns 0 on success, -EBUSY if the reset is not finished within the
3262 * timeout, and -ERESTARTSYS if the thread was interrupted.
3268 ret = wait_event_interruptible_timeout(pf->reset_wait_queue, in ice_wait_for_reset()
3269 !ice_is_reset_in_progress(pf->state), in ice_wait_for_reset()
3274 return -EBUSY; in ice_wait_for_reset()
3281 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3287 vsi->info.mapping_flags = ctx->info.mapping_flags; in ice_vsi_update_q_map()
3288 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, in ice_vsi_update_q_map()
3289 sizeof(vsi->info.q_mapping)); in ice_vsi_update_q_map()
3290 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, in ice_vsi_update_q_map()
3291 sizeof(vsi->info.tc_mapping)); in ice_vsi_update_q_map()
3295 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3304 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3318 max_txqs[i] = vsi->alloc_txq; in ice_vsi_cfg_tc()
3321 vsi->tc_cfg.ena_tc = ena_tc; in ice_vsi_cfg_tc()
3322 vsi->tc_cfg.numtc = num_tc; in ice_vsi_cfg_tc()
3326 return -ENOMEM; in ice_vsi_cfg_tc()
3328 ctx->vf_num = 0; in ice_vsi_cfg_tc()
3329 ctx->info = vsi->info; in ice_vsi_cfg_tc()
3334 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); in ice_vsi_cfg_tc()
3335 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); in ice_vsi_cfg_tc()
3338 ret = -EIO; in ice_vsi_cfg_tc()
3342 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_vsi_cfg_tc()
3347 vsi->vsi_num, ice_stat_str(status)); in ice_vsi_cfg_tc()
3348 ret = -EIO; in ice_vsi_cfg_tc()
3352 vsi->info.valid_sections = 0; in ice_vsi_cfg_tc()
3362 * ice_update_ring_stats - Update ring statistics
3371 ring->stats.bytes += bytes; in ice_update_ring_stats()
3372 ring->stats.pkts += pkts; in ice_update_ring_stats()
3376 * ice_update_tx_ring_stats - Update Tx ring specific counters
3383 u64_stats_update_begin(&tx_ring->syncp); in ice_update_tx_ring_stats()
3385 u64_stats_update_end(&tx_ring->syncp); in ice_update_tx_ring_stats()
3389 * ice_update_rx_ring_stats - Update Rx ring specific counters
3396 u64_stats_update_begin(&rx_ring->syncp); in ice_update_rx_ring_stats()
3398 u64_stats_update_end(&rx_ring->syncp); in ice_update_rx_ring_stats()
3402 * ice_status_to_errno - convert from enum ice_status to Linux errno
3411 return -ENOENT; in ice_status_to_errno()
3417 return -EIO; in ice_status_to_errno()
3420 return -EINVAL; in ice_status_to_errno()
3422 return -ENOMEM; in ice_status_to_errno()
3424 return -EAGAIN; in ice_status_to_errno()
3426 return -EBUSY; in ice_status_to_errno()
3428 return -ENOSPC; in ice_status_to_errno()
3430 return -EINVAL; in ice_status_to_errno()
3435 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3443 return (sw->dflt_vsi && sw->dflt_vsi_ena); in ice_is_dflt_vsi_in_use()
3447 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3456 return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena); in ice_is_vsi_dflt_vsi()
3460 * ice_set_dflt_vsi - set the default forwarding VSI
3468 * -EEXIST since there can only be one default VSI per switch.
3479 return -EINVAL; in ice_set_dflt_vsi()
3481 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3486 vsi->vsi_num); in ice_set_dflt_vsi()
3493 sw->dflt_vsi->vsi_num); in ice_set_dflt_vsi()
3494 return -EEXIST; in ice_set_dflt_vsi()
3497 status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX); in ice_set_dflt_vsi()
3500 vsi->vsi_num, ice_stat_str(status)); in ice_set_dflt_vsi()
3501 return -EIO; in ice_set_dflt_vsi()
3504 sw->dflt_vsi = vsi; in ice_set_dflt_vsi()
3505 sw->dflt_vsi_ena = true; in ice_set_dflt_vsi()
3511 * ice_clear_dflt_vsi - clear the default forwarding VSI
3525 return -EINVAL; in ice_clear_dflt_vsi()
3527 dev = ice_pf_to_dev(sw->pf); in ice_clear_dflt_vsi()
3529 dflt_vsi = sw->dflt_vsi; in ice_clear_dflt_vsi()
3533 return -ENODEV; in ice_clear_dflt_vsi()
3535 status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false, in ice_clear_dflt_vsi()
3539 dflt_vsi->vsi_num, ice_stat_str(status)); in ice_clear_dflt_vsi()
3540 return -EIO; in ice_clear_dflt_vsi()
3543 sw->dflt_vsi = NULL; in ice_clear_dflt_vsi()
3544 sw->dflt_vsi_ena = false; in ice_clear_dflt_vsi()
3550 * ice_set_link - turn on/off physical link
3556 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3557 struct ice_port_info *pi = vsi->port_info; in ice_set_link()
3558 struct ice_hw *hw = pi->hw; in ice_set_link()
3561 if (vsi->type != ICE_VSI_PF) in ice_set_link()
3562 return -EINVAL; in ice_set_link()
3572 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) in ice_set_link()
3575 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3579 ice_aq_str(hw->adminq.sq_last_status)); in ice_set_link()
3580 return -EIO; in ice_set_link()