Lines Matching refs:rx_swbd

721 	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];  in enetc_reuse_page()
759 struct enetc_rx_swbd rx_swbd = { in enetc_recycle_xdp_tx_buff() local
771 enetc_reuse_page(rx_ring, &rx_swbd); in enetc_recycle_xdp_tx_buff()
774 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, in enetc_recycle_xdp_tx_buff()
775 rx_swbd.page_offset, in enetc_recycle_xdp_tx_buff()
777 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
786 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, in enetc_recycle_xdp_tx_buff()
787 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
788 __free_page(rx_swbd.page); in enetc_recycle_xdp_tx_buff()
893 struct enetc_rx_swbd *rx_swbd) in enetc_new_page() argument
904 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in enetc_new_page()
906 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); in enetc_new_page()
913 rx_swbd->dma = addr; in enetc_new_page()
914 rx_swbd->page = page; in enetc_new_page()
915 rx_swbd->page_offset = rx_ring->buffer_offset; in enetc_new_page()
922 struct enetc_rx_swbd *rx_swbd; in enetc_refill_rx_ring() local
927 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
932 if (unlikely(!rx_swbd->page)) { in enetc_refill_rx_ring()
933 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { in enetc_refill_rx_ring()
940 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + in enetc_refill_rx_ring()
941 rx_swbd->page_offset); in enetc_refill_rx_ring()
946 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
1037 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_get_rx_buff() local
1039 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, in enetc_get_rx_buff()
1040 rx_swbd->page_offset, in enetc_get_rx_buff()
1041 size, rx_swbd->dir); in enetc_get_rx_buff()
1042 return rx_swbd; in enetc_get_rx_buff()
1047 struct enetc_rx_swbd *rx_swbd) in enetc_put_rx_buff() argument
1051 enetc_reuse_page(rx_ring, rx_swbd); in enetc_put_rx_buff()
1053 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, in enetc_put_rx_buff()
1054 rx_swbd->page_offset, in enetc_put_rx_buff()
1055 buffer_size, rx_swbd->dir); in enetc_put_rx_buff()
1057 rx_swbd->page = NULL; in enetc_put_rx_buff()
1062 struct enetc_rx_swbd *rx_swbd) in enetc_flip_rx_buff() argument
1064 if (likely(enetc_page_reusable(rx_swbd->page))) { in enetc_flip_rx_buff()
1065 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; in enetc_flip_rx_buff()
1066 page_ref_inc(rx_swbd->page); in enetc_flip_rx_buff()
1068 enetc_put_rx_buff(rx_ring, rx_swbd); in enetc_flip_rx_buff()
1070 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_flip_rx_buff()
1071 rx_swbd->dir); in enetc_flip_rx_buff()
1072 rx_swbd->page = NULL; in enetc_flip_rx_buff()
1079 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_skb() local
1083 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_skb()
1093 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_map_rx_buff_to_skb()
1101 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_skb() local
1103 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, in enetc_add_rx_buff_to_skb()
1104 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); in enetc_add_rx_buff_to_skb()
1106 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_add_rx_buff_to_skb()
1116 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1123 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1391 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_xdp() local
1392 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_xdp()
1396 rx_swbd->len = size; in enetc_map_rx_buff_to_xdp()
1409 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_xdp() local
1413 rx_swbd->len = size; in enetc_add_rx_buff_to_xdp()
1415 skb_frag_off_set(frag, rx_swbd->page_offset); in enetc_add_rx_buff_to_xdp()
1417 __skb_frag_set_page(frag, rx_swbd->page); in enetc_add_rx_buff_to_xdp()
1461 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_rx_swbd_to_xdp_tx_swbd() local
1465 tx_swbd->dma = rx_swbd->dma; in enetc_rx_swbd_to_xdp_tx_swbd()
1466 tx_swbd->dir = rx_swbd->dir; in enetc_rx_swbd_to_xdp_tx_swbd()
1467 tx_swbd->page = rx_swbd->page; in enetc_rx_swbd_to_xdp_tx_swbd()
1468 tx_swbd->page_offset = rx_swbd->page_offset; in enetc_rx_swbd_to_xdp_tx_swbd()
1469 tx_swbd->len = rx_swbd->len; in enetc_rx_swbd_to_xdp_tx_swbd()
1486 &rx_ring->rx_swbd[rx_ring_first]); in enetc_xdp_drop()
1496 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_xdp_free() local
1498 if (rx_swbd->page) { in enetc_xdp_free()
1499 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_xdp_free()
1500 rx_swbd->dir); in enetc_xdp_free()
1501 __free_page(rx_swbd->page); in enetc_xdp_free()
1502 rx_swbd->page = NULL; in enetc_xdp_free()
1598 rx_ring->rx_swbd[orig_i].page = NULL; in enetc_clean_rx_ring_xdp()
1622 &rx_ring->rx_swbd[orig_i]); in enetc_clean_rx_ring_xdp()
1846 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); in enetc_alloc_rxbdr()
1847 if (!rxr->rx_swbd) in enetc_alloc_rxbdr()
1855 vfree(rxr->rx_swbd); in enetc_alloc_rxbdr()
1876 vfree(rxr->rx_swbd); in enetc_free_rxbdr()
1877 rxr->rx_swbd = NULL; in enetc_free_rxbdr()
1930 if (!rx_ring->rx_swbd) in enetc_free_rx_ring()
1934 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_free_rx_ring() local
1936 if (!rx_swbd->page) in enetc_free_rx_ring()
1939 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_free_rx_ring()
1940 rx_swbd->dir); in enetc_free_rx_ring()
1941 __free_page(rx_swbd->page); in enetc_free_rx_ring()
1942 rx_swbd->page = NULL; in enetc_free_rx_ring()