Lines Matching refs:rx_ring

373 void ice_clean_rx_ring(struct ice_ring *rx_ring)  in ice_clean_rx_ring()  argument
375 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
379 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
382 if (rx_ring->skb) { in ice_clean_rx_ring()
383 dev_kfree_skb(rx_ring->skb); in ice_clean_rx_ring()
384 rx_ring->skb = NULL; in ice_clean_rx_ring()
387 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
388 ice_xsk_clean_rx_ring(rx_ring); in ice_clean_rx_ring()
393 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
394 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
404 rx_ring->rx_buf_len, in ice_clean_rx_ring()
408 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring()
417 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); in ice_clean_rx_ring()
420 memset(rx_ring->desc, 0, rx_ring->size); in ice_clean_rx_ring()
422 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
423 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
424 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
433 void ice_free_rx_ring(struct ice_ring *rx_ring) in ice_free_rx_ring() argument
435 ice_clean_rx_ring(rx_ring); in ice_free_rx_ring()
436 if (rx_ring->vsi->type == ICE_VSI_PF) in ice_free_rx_ring()
437 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_free_rx_ring()
438 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ice_free_rx_ring()
439 rx_ring->xdp_prog = NULL; in ice_free_rx_ring()
440 devm_kfree(rx_ring->dev, rx_ring->rx_buf); in ice_free_rx_ring()
441 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
443 if (rx_ring->desc) { in ice_free_rx_ring()
444 dmam_free_coherent(rx_ring->dev, rx_ring->size, in ice_free_rx_ring()
445 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
446 rx_ring->desc = NULL; in ice_free_rx_ring()
456 int ice_setup_rx_ring(struct ice_ring *rx_ring) in ice_setup_rx_ring() argument
458 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
464 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
465 rx_ring->rx_buf = in ice_setup_rx_ring()
466 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, in ice_setup_rx_ring()
468 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
472 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
474 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, in ice_setup_rx_ring()
476 if (!rx_ring->desc) { in ice_setup_rx_ring()
478 rx_ring->size); in ice_setup_rx_ring()
482 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
483 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
485 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) in ice_setup_rx_ring()
486 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); in ice_setup_rx_ring()
488 if (rx_ring->vsi->type == ICE_VSI_PF && in ice_setup_rx_ring()
489 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_setup_rx_ring()
490 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, in ice_setup_rx_ring()
491 rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) in ice_setup_rx_ring()
496 devm_kfree(dev, rx_ring->rx_buf); in ice_setup_rx_ring()
497 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
502 ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) in ice_rx_frame_truesize() argument
507 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ice_rx_frame_truesize()
509 truesize = rx_ring->rx_offset ? in ice_rx_frame_truesize()
510 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + in ice_rx_frame_truesize()
526 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument
538 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; in ice_run_xdp()
544 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ice_run_xdp()
553 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
617 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) in ice_alloc_mapped_page() argument
627 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); in ice_alloc_mapped_page()
629 rx_ring->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
634 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), in ice_alloc_mapped_page()
640 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
641 __free_pages(page, ice_rx_pg_order(rx_ring)); in ice_alloc_mapped_page()
642 rx_ring->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
648 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
668 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) in ice_alloc_rx_bufs() argument
671 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
675 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || in ice_alloc_rx_bufs()
680 rx_desc = ICE_RX_DESC(rx_ring, ntu); in ice_alloc_rx_bufs()
681 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
685 if (!ice_alloc_mapped_page(rx_ring, bi)) in ice_alloc_rx_bufs()
689 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
691 rx_ring->rx_buf_len, in ice_alloc_rx_bufs()
702 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
703 rx_desc = ICE_RX_DESC(rx_ring, 0); in ice_alloc_rx_bufs()
704 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
714 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
715 ice_release_rx_desc(rx_ring, ntu); in ice_alloc_rx_bufs()
797 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, in ice_add_rx_frag() argument
801 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); in ice_add_rx_frag()
803 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; in ice_add_rx_frag()
823 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) in ice_reuse_rx_page() argument
825 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
828 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
832 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
854 ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size, in ice_get_rx_buf() argument
859 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; in ice_get_rx_buf()
871 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
891 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, in ice_build_skb() argument
896 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; in ice_build_skb()
918 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb()
943 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, in ice_construct_skb() argument
954 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, in ice_construct_skb()
959 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
975 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; in ice_construct_skb()
1003 ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, in ice_put_rx_buf() argument
1006 u16 ntc = rx_ring->next_to_clean + 1; in ice_put_rx_buf()
1009 ntc = (ntc < rx_ring->count) ? ntc : 0; in ice_put_rx_buf()
1010 rx_ring->next_to_clean = ntc; in ice_put_rx_buf()
1017 ice_reuse_rx_page(rx_ring, rx_buf); in ice_put_rx_buf()
1020 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, in ice_put_rx_buf()
1021 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, in ice_put_rx_buf()
1039 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) in ice_is_non_eop() argument
1046 rx_ring->rx_stats.non_eop_descs++; in ice_is_non_eop()
1063 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) in ice_clean_rx_irq() argument
1066 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); in ice_clean_rx_irq()
1067 unsigned int offset = rx_ring->rx_offset; in ice_clean_rx_irq()
1069 struct sk_buff *skb = rx_ring->skb; in ice_clean_rx_irq()
1076 frame_sz = ice_rx_frame_truesize(rx_ring, 0); in ice_clean_rx_irq()
1078 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ice_clean_rx_irq()
1092 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ice_clean_rx_irq()
1109 ice_trace(clean_rx_irq, rx_ring, rx_desc); in ice_clean_rx_irq()
1110 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { in ice_clean_rx_irq()
1111 struct ice_vsi *ctrl_vsi = rx_ring->vsi; in ice_clean_rx_irq()
1116 ice_put_rx_buf(rx_ring, NULL, 0); in ice_clean_rx_irq()
1125 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); in ice_clean_rx_irq()
1140 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); in ice_clean_rx_irq()
1143 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ice_clean_rx_irq()
1147 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); in ice_clean_rx_irq()
1160 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); in ice_clean_rx_irq()
1164 ice_add_rx_frag(rx_ring, rx_buf, skb, size); in ice_clean_rx_irq()
1166 if (ice_ring_uses_build_skb(rx_ring)) in ice_clean_rx_irq()
1167 skb = ice_build_skb(rx_ring, rx_buf, &xdp); in ice_clean_rx_irq()
1169 skb = ice_construct_skb(rx_ring, rx_buf, &xdp); in ice_clean_rx_irq()
1173 rx_ring->rx_stats.alloc_buf_failed++; in ice_clean_rx_irq()
1179 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); in ice_clean_rx_irq()
1183 if (ice_is_non_eop(rx_ring, rx_desc)) in ice_clean_rx_irq()
1209 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); in ice_clean_rx_irq()
1211 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); in ice_clean_rx_irq()
1213 ice_receive_skb(rx_ring, skb, vlan_tag); in ice_clean_rx_irq()
1221 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); in ice_clean_rx_irq()
1224 ice_finalize_xdp_rx(rx_ring, xdp_xmit); in ice_clean_rx_irq()
1225 rx_ring->skb = skb; in ice_clean_rx_irq()
1227 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); in ice_clean_rx_irq()