Lines Matching refs:rx_swbd
453 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; in enetc_reuse_page()
491 struct enetc_rx_swbd rx_swbd = { in enetc_recycle_xdp_tx_buff() local
503 enetc_reuse_page(rx_ring, &rx_swbd); in enetc_recycle_xdp_tx_buff()
506 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, in enetc_recycle_xdp_tx_buff()
507 rx_swbd.page_offset, in enetc_recycle_xdp_tx_buff()
509 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
518 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, in enetc_recycle_xdp_tx_buff()
519 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
520 __free_page(rx_swbd.page); in enetc_recycle_xdp_tx_buff()
624 struct enetc_rx_swbd *rx_swbd) in enetc_new_page() argument
635 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in enetc_new_page()
637 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); in enetc_new_page()
644 rx_swbd->dma = addr; in enetc_new_page()
645 rx_swbd->page = page; in enetc_new_page()
646 rx_swbd->page_offset = rx_ring->buffer_offset; in enetc_new_page()
653 struct enetc_rx_swbd *rx_swbd; in enetc_refill_rx_ring() local
658 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
663 if (unlikely(!rx_swbd->page)) { in enetc_refill_rx_ring()
664 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { in enetc_refill_rx_ring()
671 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + in enetc_refill_rx_ring()
672 rx_swbd->page_offset); in enetc_refill_rx_ring()
677 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
768 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_get_rx_buff() local
770 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, in enetc_get_rx_buff()
771 rx_swbd->page_offset, in enetc_get_rx_buff()
772 size, rx_swbd->dir); in enetc_get_rx_buff()
773 return rx_swbd; in enetc_get_rx_buff()
778 struct enetc_rx_swbd *rx_swbd) in enetc_put_rx_buff() argument
782 enetc_reuse_page(rx_ring, rx_swbd); in enetc_put_rx_buff()
784 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, in enetc_put_rx_buff()
785 rx_swbd->page_offset, in enetc_put_rx_buff()
786 buffer_size, rx_swbd->dir); in enetc_put_rx_buff()
788 rx_swbd->page = NULL; in enetc_put_rx_buff()
793 struct enetc_rx_swbd *rx_swbd) in enetc_flip_rx_buff() argument
795 if (likely(enetc_page_reusable(rx_swbd->page))) { in enetc_flip_rx_buff()
796 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; in enetc_flip_rx_buff()
797 page_ref_inc(rx_swbd->page); in enetc_flip_rx_buff()
799 enetc_put_rx_buff(rx_ring, rx_swbd); in enetc_flip_rx_buff()
801 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_flip_rx_buff()
802 rx_swbd->dir); in enetc_flip_rx_buff()
803 rx_swbd->page = NULL; in enetc_flip_rx_buff()
810 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_skb() local
814 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_skb()
824 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_map_rx_buff_to_skb()
832 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_skb() local
834 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, in enetc_add_rx_buff_to_skb()
835 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); in enetc_add_rx_buff_to_skb()
837 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_add_rx_buff_to_skb()
847 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
854 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1122 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_xdp() local
1123 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_xdp()
1127 rx_swbd->len = size; in enetc_map_rx_buff_to_xdp()
1140 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_xdp() local
1144 rx_swbd->len = size; in enetc_add_rx_buff_to_xdp()
1146 skb_frag_off_set(frag, rx_swbd->page_offset); in enetc_add_rx_buff_to_xdp()
1148 __skb_frag_set_page(frag, rx_swbd->page); in enetc_add_rx_buff_to_xdp()
1192 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_rx_swbd_to_xdp_tx_swbd() local
1196 tx_swbd->dma = rx_swbd->dma; in enetc_rx_swbd_to_xdp_tx_swbd()
1197 tx_swbd->dir = rx_swbd->dir; in enetc_rx_swbd_to_xdp_tx_swbd()
1198 tx_swbd->page = rx_swbd->page; in enetc_rx_swbd_to_xdp_tx_swbd()
1199 tx_swbd->page_offset = rx_swbd->page_offset; in enetc_rx_swbd_to_xdp_tx_swbd()
1200 tx_swbd->len = rx_swbd->len; in enetc_rx_swbd_to_xdp_tx_swbd()
1217 &rx_ring->rx_swbd[rx_ring_first]); in enetc_xdp_drop()
1227 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_xdp_free() local
1229 if (rx_swbd->page) { in enetc_xdp_free()
1230 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_xdp_free()
1231 rx_swbd->dir); in enetc_xdp_free()
1232 __free_page(rx_swbd->page); in enetc_xdp_free()
1233 rx_swbd->page = NULL; in enetc_xdp_free()
1329 rx_ring->rx_swbd[orig_i].page = NULL; in enetc_clean_rx_ring_xdp()
1353 &rx_ring->rx_swbd[orig_i]); in enetc_clean_rx_ring_xdp()
1556 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); in enetc_alloc_rxbdr()
1557 if (!rxr->rx_swbd) in enetc_alloc_rxbdr()
1565 vfree(rxr->rx_swbd); in enetc_alloc_rxbdr()
1586 vfree(rxr->rx_swbd); in enetc_free_rxbdr()
1587 rxr->rx_swbd = NULL; in enetc_free_rxbdr()
1640 if (!rx_ring->rx_swbd) in enetc_free_rx_ring()
1644 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_free_rx_ring() local
1646 if (!rx_swbd->page) in enetc_free_rx_ring()
1649 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_free_rx_ring()
1650 rx_swbd->dir); in enetc_free_rx_ring()
1651 __free_page(rx_swbd->page); in enetc_free_rx_ring()
1652 rx_swbd->page = NULL; in enetc_free_rx_ring()