Lines Matching refs:tx_swbd

32 static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)  in enetc_tx_swbd_get_skb()  argument
34 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) in enetc_tx_swbd_get_skb()
37 return tx_swbd->skb; in enetc_tx_swbd_get_skb()
41 enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) in enetc_tx_swbd_get_xdp_frame() argument
43 if (tx_swbd->is_xdp_redirect) in enetc_tx_swbd_get_xdp_frame()
44 return tx_swbd->xdp_frame; in enetc_tx_swbd_get_xdp_frame()
50 struct enetc_tx_swbd *tx_swbd) in enetc_unmap_tx_buff() argument
56 if (tx_swbd->is_dma_page) in enetc_unmap_tx_buff()
57 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
58 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, in enetc_unmap_tx_buff()
59 tx_swbd->dir); in enetc_unmap_tx_buff()
61 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
62 tx_swbd->len, tx_swbd->dir); in enetc_unmap_tx_buff()
63 tx_swbd->dma = 0; in enetc_unmap_tx_buff()
67 struct enetc_tx_swbd *tx_swbd) in enetc_free_tx_frame() argument
69 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); in enetc_free_tx_frame()
70 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); in enetc_free_tx_frame()
72 if (tx_swbd->dma) in enetc_free_tx_frame()
73 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame()
76 xdp_return_frame(tx_swbd->xdp_frame); in enetc_free_tx_frame()
77 tx_swbd->xdp_frame = NULL; in enetc_free_tx_frame()
80 tx_swbd->skb = NULL; in enetc_free_tx_frame()
129 struct enetc_tx_swbd *tx_swbd; in enetc_map_tx_buffs() local
153 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
154 tx_swbd->dma = dma; in enetc_map_tx_buffs()
155 tx_swbd->len = len; in enetc_map_tx_buffs()
156 tx_swbd->is_dma_page = 0; in enetc_map_tx_buffs()
157 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_buffs()
172 tx_swbd->do_twostep_tstamp = do_twostep_tstamp; in enetc_map_tx_buffs()
173 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp; in enetc_map_tx_buffs()
196 tx_swbd++; in enetc_map_tx_buffs()
201 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
266 tx_swbd++; in enetc_map_tx_buffs()
271 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
279 tx_swbd->dma = dma; in enetc_map_tx_buffs()
280 tx_swbd->len = len; in enetc_map_tx_buffs()
281 tx_swbd->is_dma_page = 1; in enetc_map_tx_buffs()
282 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_buffs()
291 tx_ring->tx_swbd[i].is_eof = true; in enetc_map_tx_buffs()
292 tx_ring->tx_swbd[i].skb = skb; in enetc_map_tx_buffs()
307 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
308 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_map_tx_buffs()
488 struct enetc_tx_swbd *tx_swbd) in enetc_recycle_xdp_tx_buff() argument
492 .dma = tx_swbd->dma, in enetc_recycle_xdp_tx_buff()
493 .page = tx_swbd->page, in enetc_recycle_xdp_tx_buff()
494 .page_offset = tx_swbd->page_offset, in enetc_recycle_xdp_tx_buff()
495 .dir = tx_swbd->dir, in enetc_recycle_xdp_tx_buff()
496 .len = tx_swbd->len, in enetc_recycle_xdp_tx_buff()
531 struct enetc_tx_swbd *tx_swbd; in enetc_clean_tx_ring() local
537 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_clean_tx_ring()
544 struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); in enetc_clean_tx_ring()
545 struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); in enetc_clean_tx_ring()
546 bool is_eof = tx_swbd->is_eof; in enetc_clean_tx_ring()
548 if (unlikely(tx_swbd->check_wb)) { in enetc_clean_tx_ring()
555 tx_swbd->do_twostep_tstamp) { in enetc_clean_tx_ring()
562 if (tx_swbd->is_xdp_tx) in enetc_clean_tx_ring()
563 enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
564 else if (likely(tx_swbd->dma)) in enetc_clean_tx_ring()
565 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_clean_tx_ring()
570 if (unlikely(tx_swbd->skb->cb[0] & in enetc_clean_tx_ring()
584 tx_byte_cnt += tx_swbd->len; in enetc_clean_tx_ring()
588 memset(tx_swbd, 0, sizeof(*tx_swbd)); in enetc_clean_tx_ring()
591 tx_swbd++; in enetc_clean_tx_ring()
595 tx_swbd = tx_ring->tx_swbd; in enetc_clean_tx_ring()
958 struct enetc_tx_swbd *tx_swbd, in enetc_xdp_map_tx_buff() argument
966 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); in enetc_xdp_map_tx_buff()
967 txbd->buf_len = cpu_to_le16(tx_swbd->len); in enetc_xdp_map_tx_buff()
970 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); in enetc_xdp_map_tx_buff()
1193 struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; in enetc_rx_swbd_to_xdp_tx_swbd() local
1196 tx_swbd->dma = rx_swbd->dma; in enetc_rx_swbd_to_xdp_tx_swbd()
1197 tx_swbd->dir = rx_swbd->dir; in enetc_rx_swbd_to_xdp_tx_swbd()
1198 tx_swbd->page = rx_swbd->page; in enetc_rx_swbd_to_xdp_tx_swbd()
1199 tx_swbd->page_offset = rx_swbd->page_offset; in enetc_rx_swbd_to_xdp_tx_swbd()
1200 tx_swbd->len = rx_swbd->len; in enetc_rx_swbd_to_xdp_tx_swbd()
1201 tx_swbd->is_dma_page = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1202 tx_swbd->is_xdp_tx = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1203 tx_swbd->is_eof = false; in enetc_rx_swbd_to_xdp_tx_swbd()
1491 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); in enetc_alloc_txbdr()
1492 if (!txr->tx_swbd) in enetc_alloc_txbdr()
1497 vfree(txr->tx_swbd); in enetc_alloc_txbdr()
1512 enetc_free_tx_frame(txr, &txr->tx_swbd[i]); in enetc_free_txbdr()
1519 vfree(txr->tx_swbd); in enetc_free_txbdr()
1520 txr->tx_swbd = NULL; in enetc_free_txbdr()
1623 if (!tx_ring->tx_swbd) in enetc_free_tx_ring()
1627 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; in enetc_free_tx_ring() local
1629 enetc_free_tx_frame(tx_ring, tx_swbd); in enetc_free_tx_ring()