Lines Matching refs:tx_ring
25 struct enetc_bdr *tx_ring) in enetc_rx_ring_from_xdp_tx_ring() argument
27 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring()
49 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument
57 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
61 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
66 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, in enetc_free_tx_frame() argument
73 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame()
85 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) in enetc_update_tx_ring_tail() argument
88 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail()
124 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) in enetc_map_tx_buffs() argument
127 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_map_tx_buffs()
141 i = tx_ring->next_to_use; in enetc_map_tx_buffs()
142 txbd = ENETC_TXBD(*tx_ring, i); in enetc_map_tx_buffs()
145 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); in enetc_map_tx_buffs()
146 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in enetc_map_tx_buffs()
153 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
178 if (tx_ring->tsd_enable) in enetc_map_tx_buffs()
199 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
201 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
202 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
257 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, in enetc_map_tx_buffs()
259 if (dma_mapping_error(tx_ring->dev, dma)) in enetc_map_tx_buffs()
269 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
271 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
272 txbd = ENETC_TXBD(*tx_ring, 0); in enetc_map_tx_buffs()
291 tx_ring->tx_swbd[i].is_eof = true; in enetc_map_tx_buffs()
292 tx_ring->tx_swbd[i].skb = skb; in enetc_map_tx_buffs()
294 enetc_bdr_idx_inc(tx_ring, &i); in enetc_map_tx_buffs()
295 tx_ring->next_to_use = i; in enetc_map_tx_buffs()
299 enetc_update_tx_ring_tail(tx_ring); in enetc_map_tx_buffs()
304 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_buffs()
307 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
308 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_map_tx_buffs()
310 i = tx_ring->bd_count; in enetc_map_tx_buffs()
321 struct enetc_bdr *tx_ring; in enetc_start_xmit() local
333 tx_ring = priv->tx_ring[skb->queue_mapping]; in enetc_start_xmit()
340 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { in enetc_start_xmit()
341 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
346 count = enetc_map_tx_buffs(tx_ring, skb); in enetc_start_xmit()
352 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) in enetc_start_xmit()
353 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
436 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) in enetc_bd_ready_count() argument
438 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; in enetc_bd_ready_count()
440 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; in enetc_bd_ready_count()
487 static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, in enetc_recycle_xdp_tx_buff() argument
490 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_recycle_xdp_tx_buff()
500 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); in enetc_recycle_xdp_tx_buff()
526 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) in enetc_clean_tx_ring() argument
528 struct net_device *ndev = tx_ring->ndev; in enetc_clean_tx_ring()
536 i = tx_ring->next_to_clean; in enetc_clean_tx_ring()
537 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_clean_tx_ring()
539 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
552 txbd = ENETC_TXBD(*tx_ring, i); in enetc_clean_tx_ring()
563 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
565 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
593 if (unlikely(i == tx_ring->bd_count)) { in enetc_clean_tx_ring()
595 tx_swbd = tx_ring->tx_swbd; in enetc_clean_tx_ring()
602 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | in enetc_clean_tx_ring()
603 BIT(16 + tx_ring->index)); in enetc_clean_tx_ring()
607 bds_to_clean = enetc_bd_ready_count(tx_ring, i); in enetc_clean_tx_ring()
610 tx_ring->next_to_clean = i; in enetc_clean_tx_ring()
611 tx_ring->stats.packets += tx_frm_cnt; in enetc_clean_tx_ring()
612 tx_ring->stats.bytes += tx_byte_cnt; in enetc_clean_tx_ring()
615 __netif_subqueue_stopped(ndev, tx_ring->index) && in enetc_clean_tx_ring()
616 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { in enetc_clean_tx_ring()
617 netif_wake_subqueue(ndev, tx_ring->index); in enetc_clean_tx_ring()
957 static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, in enetc_xdp_map_tx_buff() argument
961 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_xdp_map_tx_buff()
970 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); in enetc_xdp_map_tx_buff()
976 static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, in enetc_xdp_tx() argument
982 if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd))) in enetc_xdp_tx()
990 i = tx_ring->next_to_use; in enetc_xdp_tx()
995 enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); in enetc_xdp_tx()
999 union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i); in enetc_xdp_tx()
1004 enetc_bdr_idx_inc(tx_ring, &i); in enetc_xdp_tx()
1007 tx_ring->next_to_use = i; in enetc_xdp_tx()
1012 static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, in enetc_xdp_frame_to_xdp_tx_swbd() argument
1025 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1026 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1027 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1048 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1049 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1052 enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); in enetc_xdp_frame_to_xdp_tx_swbd()
1054 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1080 struct enetc_bdr *tx_ring; in enetc_xdp_xmit() local
1086 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; in enetc_xdp_xmit()
1088 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); in enetc_xdp_xmit()
1091 xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, in enetc_xdp_xmit()
1097 if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, in enetc_xdp_xmit()
1100 enetc_unmap_tx_buff(tx_ring, in enetc_xdp_xmit()
1102 tx_ring->stats.xdp_tx_drops++; in enetc_xdp_xmit()
1110 enetc_update_tx_ring_tail(tx_ring); in enetc_xdp_xmit()
1112 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; in enetc_xdp_xmit()
1248 struct enetc_bdr *tx_ring; in enetc_clean_rx_ring_xdp() local
1309 tx_ring = priv->xdp_tx_ring[rx_ring->index]; in enetc_clean_rx_ring_xdp()
1314 if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { in enetc_clean_rx_ring_xdp()
1316 tx_ring->stats.xdp_tx_drops++; in enetc_clean_rx_ring_xdp()
1318 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1379 enetc_update_tx_ring_tail(tx_ring); in enetc_clean_rx_ring_xdp()
1401 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) in enetc_poll()
1528 err = enetc_alloc_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
1538 enetc_free_txbdr(priv->tx_ring[i]); in enetc_alloc_tx_resources()
1548 enetc_free_txbdr(priv->tx_ring[i]); in enetc_free_tx_resources()
1619 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) in enetc_free_tx_ring() argument
1623 if (!tx_ring->tx_swbd) in enetc_free_tx_ring()
1626 for (i = 0; i < tx_ring->bd_count; i++) { in enetc_free_tx_ring()
1627 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; in enetc_free_tx_ring()
1629 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_free_tx_ring()
1632 tx_ring->next_to_clean = 0; in enetc_free_tx_ring()
1633 tx_ring->next_to_use = 0; in enetc_free_tx_ring()
1668 enetc_free_tx_ring(priv->tx_ring[i]); in enetc_free_rxtx_rings()
1749 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_setup_txbdr() argument
1751 int idx = tx_ring->index; in enetc_setup_txbdr()
1755 lower_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
1758 upper_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
1760 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_txbdr()
1762 ENETC_RTBLENR_LEN(tx_ring->bd_count)); in enetc_setup_txbdr()
1765 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); in enetc_setup_txbdr()
1766 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); in enetc_setup_txbdr()
1772 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in enetc_setup_txbdr()
1778 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); in enetc_setup_txbdr()
1779 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); in enetc_setup_txbdr()
1780 tx_ring->idr = hw->reg + ENETC_SITXIDR; in enetc_setup_txbdr()
1832 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); in enetc_setup_bdrs()
1846 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) in enetc_clear_txbdr() argument
1849 int idx = tx_ring->index; in enetc_clear_txbdr()
1862 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", in enetc_clear_txbdr()
1871 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); in enetc_clear_bdrs()
1906 int idx = v->tx_ring[j].index; in enetc_setup_irqs()
2149 struct enetc_bdr *tx_ring; in enetc_setup_tc_mqprio() local
2164 tx_ring = priv->tx_ring[i]; in enetc_setup_tc_mqprio()
2165 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0); in enetc_setup_tc_mqprio()
2183 tx_ring = priv->tx_ring[i]; in enetc_setup_tc_mqprio()
2184 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i); in enetc_setup_tc_mqprio()
2284 packets += priv->tx_ring[i]->stats.packets; in enetc_get_stats()
2285 bytes += priv->tx_ring[i]->stats.bytes; in enetc_get_stats()
2482 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); in enetc_alloc_msix()
2528 bdr = &v->tx_ring[j]; in enetc_alloc_msix()
2533 priv->tx_ring[idx] = bdr; in enetc_alloc_msix()
2538 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; in enetc_alloc_msix()
2577 priv->tx_ring[i] = NULL; in enetc_free_msix()