Lines Matching +full:tcs +full:- +full:wait
1 /* SPDX-License-Identifier: GPL-2.0 */
19 #include <linux/dma-mapping.h>
22 #include <linux/wait.h>
96 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
97 #define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
99 #define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
110 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
116 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
117 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
118 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
119 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
123 for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
127 for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
130 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
134 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
137 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
140 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
159 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
162 u32 q_teid; /* Tx-scheduler element identifier */
177 u8 numtc; /* Total number of enabled TCs */
189 struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
286 u16 idx; /* software index in pf->vsi[] */
288 s16 vf_id; /* VF ID for SR-IOV VSIs */
303 #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1)
334 u16 *txq_map; /* index in pf->avail_txqs */
335 u16 *rxq_map; /* index in pf->avail_rxqs */
362 u16 v_idx; /* index in the vsi->q_vector array. */
427 /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
428 * number of MSIX vectors needed for all SR-IOV VFs from the number of
433 u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
437 /* Virtchnl/SR-IOV config info */
462 /* spinlock to protect the AdminQ wait list */
477 u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
518 * ice_irq_dynamic_ena - Enable default interrupt generation settings
527 u32 vector = (vsi && q_vector) ? q_vector->reg_idx : in ice_irq_dynamic_ena()
528 ((struct ice_pf *)hw->back)->oicr_idx; in ice_irq_dynamic_ena()
538 if (test_bit(ICE_VSI_DOWN, vsi->state)) in ice_irq_dynamic_ena()
544 * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
551 return np->vsi->back; in ice_netdev_to_pf()
556 return !!vsi->xdp_prog; in ice_is_xdp_ena_vsi()
561 ring->flags |= ICE_TX_FLAGS_RING_XDP; in ice_set_ring_xdp()
565 * ice_xsk_pool - get XSK buffer pool bound to a ring
573 struct ice_vsi *vsi = ring->vsi; in ice_xsk_pool()
574 u16 qid = ring->q_index; in ice_xsk_pool()
577 qid -= vsi->num_xdp_txq; in ice_xsk_pool()
579 if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) in ice_xsk_pool()
582 return xsk_get_pool_from_qid(vsi->netdev, qid); in ice_xsk_pool()
586 * ice_get_main_vsi - Get the PF VSI
589 * returns pf->vsi[0], which by definition is the PF VSI
593 if (pf->vsi) in ice_get_main_vsi()
594 return pf->vsi[0]; in ice_get_main_vsi()
600 * ice_get_ctrl_vsi - Get the control VSI
605 /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ in ice_get_ctrl_vsi()
606 if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) in ice_get_ctrl_vsi()
609 return pf->vsi[pf->ctrl_vsi_idx]; in ice_get_ctrl_vsi()
613 * ice_set_sriov_cap - enable SRIOV in PF flags
618 if (pf->hw.func_caps.common_cap.sr_iov_1_1) in ice_set_sriov_cap()
619 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_sriov_cap()
623 * ice_clear_sriov_cap - disable SRIOV in PF flags
628 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_clear_sriov_cap()
691 * ice_set_rdma_cap - enable RDMA support
696 if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { in ice_set_rdma_cap()
697 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_rdma_cap()
698 set_bit(ICE_FLAG_AUX_ENA, pf->flags); in ice_set_rdma_cap()
704 * ice_clear_rdma_cap - disable RDMA support
710 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_clear_rdma_cap()
711 clear_bit(ICE_FLAG_AUX_ENA, pf->flags); in ice_clear_rdma_cap()