Lines Matching refs:rx_ring

18 		if (priv->rx_ring[i]->xdp.prog)  in enetc_num_stack_tx_queues()
29 return priv->rx_ring[index]; in enetc_rx_ring_from_xdp_tx_ring()
430 v->rx_ring.stats.packets, in enetc_rx_net_dim()
431 v->rx_ring.stats.bytes, in enetc_rx_net_dim()
448 static void enetc_reuse_page(struct enetc_bdr *rx_ring, in enetc_reuse_page() argument
453 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; in enetc_reuse_page()
456 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); in enetc_reuse_page()
498 struct enetc_bdr *rx_ring; in enetc_recycle_xdp_tx_buff() local
500 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); in enetc_recycle_xdp_tx_buff()
502 if (likely(enetc_swbd_unused(rx_ring))) { in enetc_recycle_xdp_tx_buff()
503 enetc_reuse_page(rx_ring, &rx_swbd); in enetc_recycle_xdp_tx_buff()
506 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, in enetc_recycle_xdp_tx_buff()
511 rx_ring->stats.recycles++; in enetc_recycle_xdp_tx_buff()
516 rx_ring->stats.recycle_failures++; in enetc_recycle_xdp_tx_buff()
518 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, in enetc_recycle_xdp_tx_buff()
523 rx_ring->xdp.xdp_tx_in_flight--; in enetc_recycle_xdp_tx_buff()
623 static bool enetc_new_page(struct enetc_bdr *rx_ring, in enetc_new_page() argument
626 bool xdp = !!(rx_ring->xdp.prog); in enetc_new_page()
637 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); in enetc_new_page()
638 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { in enetc_new_page()
646 rx_swbd->page_offset = rx_ring->buffer_offset; in enetc_new_page()
651 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) in enetc_refill_rx_ring() argument
657 i = rx_ring->next_to_use; in enetc_refill_rx_ring()
658 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
659 rxbd = enetc_rxbd(rx_ring, i); in enetc_refill_rx_ring()
664 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { in enetc_refill_rx_ring()
665 rx_ring->stats.rx_alloc_errs++; in enetc_refill_rx_ring()
676 enetc_rxbd_next(rx_ring, &rxbd, &i); in enetc_refill_rx_ring()
677 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
681 rx_ring->next_to_alloc = i; /* keep track from page reuse */ in enetc_refill_rx_ring()
682 rx_ring->next_to_use = i; in enetc_refill_rx_ring()
685 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); in enetc_refill_rx_ring()
717 static void enetc_get_offloads(struct enetc_bdr *rx_ring, in enetc_get_offloads() argument
720 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); in enetc_get_offloads()
723 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { in enetc_get_offloads()
757 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); in enetc_get_offloads()
765 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, in enetc_get_rx_buff() argument
768 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_get_rx_buff()
770 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, in enetc_get_rx_buff()
777 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, in enetc_put_rx_buff() argument
780 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; in enetc_put_rx_buff()
782 enetc_reuse_page(rx_ring, rx_swbd); in enetc_put_rx_buff()
784 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, in enetc_put_rx_buff()
792 static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring, in enetc_flip_rx_buff() argument
799 enetc_put_rx_buff(rx_ring, rx_swbd); in enetc_flip_rx_buff()
801 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_flip_rx_buff()
807 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, in enetc_map_rx_buff_to_skb() argument
810 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_skb()
815 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); in enetc_map_rx_buff_to_skb()
817 rx_ring->stats.rx_alloc_errs++; in enetc_map_rx_buff_to_skb()
821 skb_reserve(skb, rx_ring->buffer_offset); in enetc_map_rx_buff_to_skb()
824 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_map_rx_buff_to_skb()
829 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, in enetc_add_rx_buff_to_skb() argument
832 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_skb()
837 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_add_rx_buff_to_skb()
840 static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, in enetc_check_bd_errors_and_consume() argument
847 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
848 enetc_rxbd_next(rx_ring, rxbd, i); in enetc_check_bd_errors_and_consume()
854 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
855 enetc_rxbd_next(rx_ring, rxbd, i); in enetc_check_bd_errors_and_consume()
858 rx_ring->ndev->stats.rx_dropped++; in enetc_check_bd_errors_and_consume()
859 rx_ring->ndev->stats.rx_errors++; in enetc_check_bd_errors_and_consume()
864 static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, in enetc_build_skb() argument
872 skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); in enetc_build_skb()
876 enetc_get_offloads(rx_ring, *rxbd, skb); in enetc_build_skb()
880 enetc_rxbd_next(rx_ring, rxbd, i); in enetc_build_skb()
892 enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); in enetc_build_skb()
896 enetc_rxbd_next(rx_ring, rxbd, i); in enetc_build_skb()
899 skb_record_rx_queue(skb, rx_ring->index); in enetc_build_skb()
900 skb->protocol = eth_type_trans(skb, rx_ring->ndev); in enetc_build_skb()
907 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, in enetc_clean_rx_ring() argument
913 cleaned_cnt = enetc_bd_unused(rx_ring); in enetc_clean_rx_ring()
915 i = rx_ring->next_to_clean; in enetc_clean_rx_ring()
923 cleaned_cnt -= enetc_refill_rx_ring(rx_ring, in enetc_clean_rx_ring()
926 rxbd = enetc_rxbd(rx_ring, i); in enetc_clean_rx_ring()
931 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); in enetc_clean_rx_ring()
934 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, in enetc_clean_rx_ring()
938 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, in enetc_clean_rx_ring()
949 rx_ring->next_to_clean = i; in enetc_clean_rx_ring()
951 rx_ring->stats.packets += rx_frm_cnt; in enetc_clean_rx_ring()
952 rx_ring->stats.bytes += rx_byte_cnt; in enetc_clean_rx_ring()
1119 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, in enetc_map_rx_buff_to_xdp() argument
1122 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_xdp()
1129 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, in enetc_map_rx_buff_to_xdp()
1130 rx_ring->buffer_offset, size, false); in enetc_map_rx_buff_to_xdp()
1136 static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, in enetc_add_rx_buff_to_xdp() argument
1140 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_xdp()
1153 static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, in enetc_build_xdp_buff() argument
1159 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); in enetc_build_xdp_buff()
1161 enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); in enetc_build_xdp_buff()
1163 enetc_rxbd_next(rx_ring, rxbd, i); in enetc_build_xdp_buff()
1175 enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); in enetc_build_xdp_buff()
1177 enetc_rxbd_next(rx_ring, rxbd, i); in enetc_build_xdp_buff()
1185 struct enetc_bdr *rx_ring, in enetc_rx_swbd_to_xdp_tx_swbd() argument
1191 n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { in enetc_rx_swbd_to_xdp_tx_swbd()
1192 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_rx_swbd_to_xdp_tx_swbd()
1212 static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, in enetc_xdp_drop() argument
1216 enetc_put_rx_buff(rx_ring, in enetc_xdp_drop()
1217 &rx_ring->rx_swbd[rx_ring_first]); in enetc_xdp_drop()
1218 enetc_bdr_idx_inc(rx_ring, &rx_ring_first); in enetc_xdp_drop()
1220 rx_ring->stats.xdp_drops++; in enetc_xdp_drop()
1223 static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first, in enetc_xdp_free() argument
1227 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_xdp_free()
1230 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_xdp_free()
1235 enetc_bdr_idx_inc(rx_ring, &rx_ring_first); in enetc_xdp_free()
1237 rx_ring->stats.xdp_redirect_failures++; in enetc_xdp_free()
1240 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, in enetc_clean_rx_ring_xdp() argument
1246 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); in enetc_clean_rx_ring_xdp()
1252 cleaned_cnt = enetc_bd_unused(rx_ring); in enetc_clean_rx_ring_xdp()
1254 i = rx_ring->next_to_clean; in enetc_clean_rx_ring_xdp()
1264 rxbd = enetc_rxbd(rx_ring, i); in enetc_clean_rx_ring_xdp()
1269 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); in enetc_clean_rx_ring_xdp()
1272 if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, in enetc_clean_rx_ring_xdp()
1280 enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, in enetc_clean_rx_ring_xdp()
1290 trace_xdp_exception(rx_ring->ndev, prog, xdp_act); in enetc_clean_rx_ring_xdp()
1293 enetc_xdp_drop(rx_ring, orig_i, i); in enetc_clean_rx_ring_xdp()
1300 skb = enetc_build_skb(rx_ring, bd_status, &rxbd, in enetc_clean_rx_ring_xdp()
1309 tx_ring = priv->xdp_tx_ring[rx_ring->index]; in enetc_clean_rx_ring_xdp()
1311 rx_ring, in enetc_clean_rx_ring_xdp()
1315 enetc_xdp_drop(rx_ring, orig_i, i); in enetc_clean_rx_ring_xdp()
1319 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1329 rx_ring->rx_swbd[orig_i].page = NULL; in enetc_clean_rx_ring_xdp()
1330 enetc_bdr_idx_inc(rx_ring, &orig_i); in enetc_clean_rx_ring_xdp()
1344 enetc_xdp_drop(rx_ring, orig_i, i); in enetc_clean_rx_ring_xdp()
1345 rx_ring->stats.xdp_redirect_sg++; in enetc_clean_rx_ring_xdp()
1352 enetc_flip_rx_buff(rx_ring, in enetc_clean_rx_ring_xdp()
1353 &rx_ring->rx_swbd[orig_i]); in enetc_clean_rx_ring_xdp()
1354 enetc_bdr_idx_inc(rx_ring, &orig_i); in enetc_clean_rx_ring_xdp()
1357 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); in enetc_clean_rx_ring_xdp()
1359 enetc_xdp_free(rx_ring, tmp_orig_i, i); in enetc_clean_rx_ring_xdp()
1362 rx_ring->stats.xdp_redirect++; in enetc_clean_rx_ring_xdp()
1370 rx_ring->next_to_clean = i; in enetc_clean_rx_ring_xdp()
1372 rx_ring->stats.packets += rx_frm_cnt; in enetc_clean_rx_ring_xdp()
1373 rx_ring->stats.bytes += rx_byte_cnt; in enetc_clean_rx_ring_xdp()
1381 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) in enetc_clean_rx_ring_xdp()
1382 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - in enetc_clean_rx_ring_xdp()
1383 rx_ring->xdp.xdp_tx_in_flight); in enetc_clean_rx_ring_xdp()
1392 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_poll() local
1404 prog = rx_ring->xdp.prog; in enetc_poll()
1406 work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); in enetc_poll()
1408 work_done = enetc_clean_rx_ring(rx_ring, napi, budget); in enetc_poll()
1596 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended); in enetc_alloc_rx_resources()
1606 enetc_free_rxbdr(priv->rx_ring[i]); in enetc_alloc_rx_resources()
1616 enetc_free_rxbdr(priv->rx_ring[i]); in enetc_free_rx_resources()
1636 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) in enetc_free_rx_ring() argument
1640 if (!rx_ring->rx_swbd) in enetc_free_rx_ring()
1643 for (i = 0; i < rx_ring->bd_count; i++) { in enetc_free_rx_ring()
1644 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_free_rx_ring()
1649 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_free_rx_ring()
1655 rx_ring->next_to_clean = 0; in enetc_free_rx_ring()
1656 rx_ring->next_to_use = 0; in enetc_free_rx_ring()
1657 rx_ring->next_to_alloc = 0; in enetc_free_rx_ring()
1665 enetc_free_rx_ring(priv->rx_ring[i]); in enetc_free_rxtx_rings()
1783 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) in enetc_setup_rxbdr() argument
1785 int idx = rx_ring->index; in enetc_setup_rxbdr()
1789 lower_32_bits(rx_ring->bd_dma_base)); in enetc_setup_rxbdr()
1792 upper_32_bits(rx_ring->bd_dma_base)); in enetc_setup_rxbdr()
1794 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_rxbdr()
1796 ENETC_RTBLENR_LEN(rx_ring->bd_count)); in enetc_setup_rxbdr()
1798 if (rx_ring->xdp.prog) in enetc_setup_rxbdr()
1810 if (rx_ring->ext_en) in enetc_setup_rxbdr()
1813 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in enetc_setup_rxbdr()
1816 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); in enetc_setup_rxbdr()
1817 rx_ring->idr = hw->reg + ENETC_SIRXIDR; in enetc_setup_rxbdr()
1820 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); in enetc_setup_rxbdr()
1835 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); in enetc_setup_bdrs()
1838 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) in enetc_clear_rxbdr() argument
1840 int idx = rx_ring->index; in enetc_clear_rxbdr()
1874 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); in enetc_clear_bdrs()
2238 struct enetc_bdr *rx_ring = priv->rx_ring[i]; in enetc_setup_xdp_prog() local
2240 rx_ring->xdp.prog = prog; in enetc_setup_xdp_prog()
2243 rx_ring->buffer_offset = XDP_PACKET_HEADROOM; in enetc_setup_xdp_prog()
2245 rx_ring->buffer_offset = ENETC_RXB_PAD; in enetc_setup_xdp_prog()
2274 packets += priv->rx_ring[i]->stats.packets; in enetc_get_stats()
2275 bytes += priv->rx_ring[i]->stats.bytes; in enetc_get_stats()
2490 bdr = &v->rx_ring; in enetc_alloc_msix()
2496 priv->rx_ring[i] = bdr; in enetc_alloc_msix()
2545 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_alloc_msix() local
2547 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); in enetc_alloc_msix()
2548 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); in enetc_alloc_msix()
2565 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_free_msix() local
2567 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); in enetc_free_msix()
2568 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); in enetc_free_msix()
2574 priv->rx_ring[i] = NULL; in enetc_free_msix()