Lines Matching refs:rx_ring

574 	struct ixgbe_ring *rx_ring;  in ixgbe_dump()  local
713 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
715 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
770 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
772 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
783 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
786 if (i == rx_ring->next_to_use) in ixgbe_dump()
788 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
793 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
794 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump()
819 ixgbe_rx_bufsz(rx_ring), true); in ixgbe_dump()
1310 struct ixgbe_ring *rx_ring, in ixgbe_update_rx_dca() argument
1315 u8 reg_idx = rx_ring->reg_idx; in ixgbe_update_rx_dca()
1318 rxctrl = dca3_get_tag(rx_ring->dev, cpu); in ixgbe_update_rx_dca()
1523 static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) in ixgbe_rx_offset() argument
1525 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; in ixgbe_rx_offset()
1528 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, in ixgbe_alloc_mapped_page() argument
1539 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1541 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1546 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbe_alloc_mapped_page()
1547 ixgbe_rx_pg_size(rx_ring), in ixgbe_alloc_mapped_page()
1555 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbe_alloc_mapped_page()
1556 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); in ixgbe_alloc_mapped_page()
1558 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbe_alloc_mapped_page()
1564 bi->page_offset = rx_ring->rx_offset; in ixgbe_alloc_mapped_page()
1567 rx_ring->rx_stats.alloc_rx_page++; in ixgbe_alloc_mapped_page()
1577 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) in ixgbe_alloc_rx_buffers() argument
1581 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers()
1588 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers()
1589 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers()
1590 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1592 bufsz = ixgbe_rx_bufsz(rx_ring); in ixgbe_alloc_rx_buffers()
1595 if (!ixgbe_alloc_mapped_page(rx_ring, bi)) in ixgbe_alloc_rx_buffers()
1599 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbe_alloc_rx_buffers()
1613 rx_desc = IXGBE_RX_DESC(rx_ring, 0); in ixgbe_alloc_rx_buffers()
1614 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers()
1615 i -= rx_ring->count; in ixgbe_alloc_rx_buffers()
1624 i += rx_ring->count; in ixgbe_alloc_rx_buffers()
1626 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers()
1627 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers()
1630 rx_ring->next_to_alloc = i; in ixgbe_alloc_rx_buffers()
1638 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers()
1653 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, in ixgbe_update_rsc_stats() argument
1660 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; in ixgbe_update_rsc_stats()
1661 rx_ring->rx_stats.rsc_flush++; in ixgbe_update_rsc_stats()
1663 ixgbe_set_rsc_gso_size(rx_ring, skb); in ixgbe_update_rsc_stats()
1679 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, in ixgbe_process_skb_fields() argument
1683 struct net_device *dev = rx_ring->netdev; in ixgbe_process_skb_fields()
1684 u32 flags = rx_ring->q_vector->adapter->flags; in ixgbe_process_skb_fields()
1686 ixgbe_update_rsc_stats(rx_ring, skb); in ixgbe_process_skb_fields()
1688 ixgbe_rx_hash(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1690 ixgbe_rx_checksum(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1693 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1702 ixgbe_ipsec_rx(rx_ring, rx_desc, skb); in ixgbe_process_skb_fields()
1706 skb_record_rx_queue(skb, rx_ring->queue_index); in ixgbe_process_skb_fields()
1731 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, in ixgbe_is_non_eop() argument
1735 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_is_non_eop()
1738 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_is_non_eop()
1739 rx_ring->next_to_clean = ntc; in ixgbe_is_non_eop()
1741 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); in ixgbe_is_non_eop()
1744 if (ring_is_rsc_enabled(rx_ring)) { in ixgbe_is_non_eop()
1766 rx_ring->rx_buffer_info[ntc].skb = skb; in ixgbe_is_non_eop()
1767 rx_ring->rx_stats.non_eop_descs++; in ixgbe_is_non_eop()
1784 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, in ixgbe_pull_tail() argument
1824 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, in ixgbe_dma_sync_frag() argument
1827 if (ring_uses_build_skb(rx_ring)) { in ixgbe_dma_sync_frag()
1828 unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; in ixgbe_dma_sync_frag()
1831 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1839 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_dma_sync_frag()
1848 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, in ixgbe_dma_sync_frag()
1849 ixgbe_rx_pg_size(rx_ring), in ixgbe_dma_sync_frag()
1877 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, in ixgbe_cleanup_headers() argument
1881 struct net_device *netdev = rx_ring->netdev; in ixgbe_cleanup_headers()
1900 ixgbe_pull_tail(rx_ring, skb); in ixgbe_cleanup_headers()
1904 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) in ixgbe_cleanup_headers()
1922 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, in ixgbe_reuse_rx_page() argument
1926 u16 nta = rx_ring->next_to_alloc; in ixgbe_reuse_rx_page()
1928 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbe_reuse_rx_page()
1932 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbe_reuse_rx_page()
1997 static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, in ixgbe_add_rx_frag() argument
2003 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_add_rx_frag()
2005 unsigned int truesize = rx_ring->rx_offset ? in ixgbe_add_rx_frag()
2006 SKB_DATA_ALIGN(rx_ring->rx_offset + size) : in ixgbe_add_rx_frag()
2018 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_get_rx_buffer() argument
2026 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_get_rx_buffer()
2045 ixgbe_dma_sync_frag(rx_ring, *skb); in ixgbe_get_rx_buffer()
2049 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_get_rx_buffer()
2060 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, in ixgbe_put_rx_buffer() argument
2067 ixgbe_reuse_rx_page(rx_ring, rx_buffer); in ixgbe_put_rx_buffer()
2074 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_put_rx_buffer()
2075 ixgbe_rx_pg_size(rx_ring), in ixgbe_put_rx_buffer()
2088 static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, in ixgbe_construct_skb() argument
2095 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_construct_skb()
2122 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); in ixgbe_construct_skb()
2147 static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, in ixgbe_build_skb() argument
2154 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; in ixgbe_build_skb()
2195 struct ixgbe_ring *rx_ring, in ixgbe_run_xdp() argument
2203 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp()
2233 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp()
2243 static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, in ixgbe_rx_frame_truesize() argument
2249 truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbe_rx_frame_truesize()
2251 truesize = rx_ring->rx_offset ? in ixgbe_rx_frame_truesize()
2252 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + in ixgbe_rx_frame_truesize()
2259 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, in ixgbe_rx_buffer_flip() argument
2263 unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size); in ixgbe_rx_buffer_flip()
2285 struct ixgbe_ring *rx_ring, in ixgbe_clean_rx_irq() argument
2294 u16 cleaned_count = ixgbe_desc_unused(rx_ring); in ixgbe_clean_rx_irq()
2295 unsigned int offset = rx_ring->rx_offset; in ixgbe_clean_rx_irq()
2301 frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0); in ixgbe_clean_rx_irq()
2303 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbe_clean_rx_irq()
2314 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbe_clean_rx_irq()
2318 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq()
2329 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); in ixgbe_clean_rx_irq()
2340 xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); in ixgbe_clean_rx_irq()
2342 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); in ixgbe_clean_rx_irq()
2350 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); in ixgbe_clean_rx_irq()
2357 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); in ixgbe_clean_rx_irq()
2358 } else if (ring_uses_build_skb(rx_ring)) { in ixgbe_clean_rx_irq()
2359 skb = ixgbe_build_skb(rx_ring, rx_buffer, in ixgbe_clean_rx_irq()
2362 skb = ixgbe_construct_skb(rx_ring, rx_buffer, in ixgbe_clean_rx_irq()
2368 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_clean_rx_irq()
2373 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); in ixgbe_clean_rx_irq()
2377 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2381 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) in ixgbe_clean_rx_irq()
2388 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); in ixgbe_clean_rx_irq()
2392 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { in ixgbe_clean_rx_irq()
2397 mss = rx_ring->netdev->mtu - in ixgbe_clean_rx_irq()
2434 u64_stats_update_begin(&rx_ring->syncp); in ixgbe_clean_rx_irq()
2435 rx_ring->stats.packets += total_rx_packets; in ixgbe_clean_rx_irq()
2436 rx_ring->stats.bytes += total_rx_bytes; in ixgbe_clean_rx_irq()
2437 u64_stats_update_end(&rx_ring->syncp); in ixgbe_clean_rx_irq()
3684 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3687 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3694 struct ixgbe_ring *rx_ring) in ixgbe_configure_srrctl() argument
3698 u8 reg_idx = rx_ring->reg_idx; in ixgbe_configure_srrctl()
3714 if (rx_ring->xsk_pool) { in ixgbe_configure_srrctl()
3715 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool); in ixgbe_configure_srrctl()
3729 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { in ixgbe_configure_srrctl()
4252 struct ixgbe_ring *rx_ring; in ixgbe_set_rx_buffer_len() local
4286 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
4288 clear_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
4289 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4290 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4293 set_ring_rsc_enabled(rx_ring); in ixgbe_set_rx_buffer_len()
4295 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) in ixgbe_set_rx_buffer_len()
4296 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4301 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4305 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4309 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); in ixgbe_set_rx_buffer_len()
4396 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
4504 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
4542 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
5273 queue = adapter->rx_ring[ring]->reg_idx; in ixgbe_fdir_filter_restore()
5290 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) in ixgbe_clean_rx_ring() argument
5292 u16 i = rx_ring->next_to_clean; in ixgbe_clean_rx_ring()
5293 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbe_clean_rx_ring()
5295 if (rx_ring->xsk_pool) { in ixgbe_clean_rx_ring()
5296 ixgbe_xsk_clean_rx_ring(rx_ring); in ixgbe_clean_rx_ring()
5301 while (i != rx_ring->next_to_alloc) { in ixgbe_clean_rx_ring()
5305 dma_unmap_page_attrs(rx_ring->dev, in ixgbe_clean_rx_ring()
5307 ixgbe_rx_pg_size(rx_ring), in ixgbe_clean_rx_ring()
5316 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_clean_rx_ring()
5319 ixgbe_rx_bufsz(rx_ring), in ixgbe_clean_rx_ring()
5323 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbe_clean_rx_ring()
5324 ixgbe_rx_pg_size(rx_ring), in ixgbe_clean_rx_ring()
5332 if (i == rx_ring->count) { in ixgbe_clean_rx_ring()
5334 rx_buffer = rx_ring->rx_buffer_info; in ixgbe_clean_rx_ring()
5339 rx_ring->next_to_alloc = 0; in ixgbe_clean_rx_ring()
5340 rx_ring->next_to_clean = 0; in ixgbe_clean_rx_ring()
5341 rx_ring->next_to_use = 0; in ixgbe_clean_rx_ring()
5366 adapter->rx_ring[baseq + i]->netdev = vdev; in ixgbe_fwd_ring_up()
5385 adapter->rx_ring[baseq + i]->netdev = NULL; in ixgbe_fwd_ring_up()
5765 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
5808 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_disable_rx()
6062 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
6538 static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) in ixgbe_rx_napi_id() argument
6540 struct ixgbe_q_vector *q_vector = rx_ring->q_vector; in ixgbe_rx_napi_id()
6553 struct ixgbe_ring *rx_ring) in ixgbe_setup_rx_resources() argument
6555 struct device *dev = rx_ring->dev; in ixgbe_setup_rx_resources()
6560 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; in ixgbe_setup_rx_resources()
6562 if (rx_ring->q_vector) in ixgbe_setup_rx_resources()
6563 ring_node = rx_ring->q_vector->numa_node; in ixgbe_setup_rx_resources()
6565 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); in ixgbe_setup_rx_resources()
6566 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6567 rx_ring->rx_buffer_info = vmalloc(size); in ixgbe_setup_rx_resources()
6568 if (!rx_ring->rx_buffer_info) in ixgbe_setup_rx_resources()
6572 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbe_setup_rx_resources()
6573 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbe_setup_rx_resources()
6576 rx_ring->desc = dma_alloc_coherent(dev, in ixgbe_setup_rx_resources()
6577 rx_ring->size, in ixgbe_setup_rx_resources()
6578 &rx_ring->dma, in ixgbe_setup_rx_resources()
6581 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6582 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in ixgbe_setup_rx_resources()
6583 &rx_ring->dma, GFP_KERNEL); in ixgbe_setup_rx_resources()
6584 if (!rx_ring->desc) in ixgbe_setup_rx_resources()
6587 rx_ring->next_to_clean = 0; in ixgbe_setup_rx_resources()
6588 rx_ring->next_to_use = 0; in ixgbe_setup_rx_resources()
6591 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbe_setup_rx_resources()
6592 rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) in ixgbe_setup_rx_resources()
6595 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbe_setup_rx_resources()
6599 vfree(rx_ring->rx_buffer_info); in ixgbe_setup_rx_resources()
6600 rx_ring->rx_buffer_info = NULL; in ixgbe_setup_rx_resources()
6620 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6636 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
6687 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) in ixgbe_free_rx_resources() argument
6689 ixgbe_clean_rx_ring(rx_ring); in ixgbe_free_rx_resources()
6691 rx_ring->xdp_prog = NULL; in ixgbe_free_rx_resources()
6692 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbe_free_rx_resources()
6693 vfree(rx_ring->rx_buffer_info); in ixgbe_free_rx_resources()
6694 rx_ring->rx_buffer_info = NULL; in ixgbe_free_rx_resources()
6697 if (!rx_ring->desc) in ixgbe_free_rx_resources()
6700 dma_free_coherent(rx_ring->dev, rx_ring->size, in ixgbe_free_rx_resources()
6701 rx_ring->desc, rx_ring->dma); in ixgbe_free_rx_resources()
6703 rx_ring->desc = NULL; in ixgbe_free_rx_resources()
6721 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
6722 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
6742 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_change_mtu()
7049 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
7050 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
7057 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_update_stats() local
7059 if (!rx_ring) in ixgbe_update_stats()
7061 non_eop_descs += rx_ring->rx_stats.non_eop_descs; in ixgbe_update_stats()
7062 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbe_update_stats()
7063 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbe_update_stats()
7064 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbe_update_stats()
7065 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbe_update_stats()
7066 bytes += rx_ring->stats.bytes; in ixgbe_update_stats()
7067 packets += rx_ring->stats.packets; in ixgbe_update_stats()
8932 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()
9267 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; in get_macvlan_queue()
10048 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; in ixgbe_fwd_del()
10125 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_xdp_setup()
10155 (void)xchg(&adapter->rx_ring[i]->xdp_prog, in ixgbe_xdp_setup()
10324 struct ixgbe_ring *rx_ring) in ixgbe_disable_rxr_hw() argument
10328 u8 reg_idx = rx_ring->reg_idx; in ixgbe_disable_rxr_hw()
10368 static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) in ixgbe_reset_rxr_stats() argument
10370 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); in ixgbe_reset_rxr_stats()
10371 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); in ixgbe_reset_rxr_stats()
10384 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; in ixgbe_txrx_ring_disable() local
10386 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_disable()
10393 ixgbe_disable_rxr_hw(adapter, rx_ring); in ixgbe_txrx_ring_disable()
10399 napi_disable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_disable()
10404 ixgbe_clean_rx_ring(rx_ring); in ixgbe_txrx_ring_disable()
10409 ixgbe_reset_rxr_stats(rx_ring); in ixgbe_txrx_ring_disable()
10422 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; in ixgbe_txrx_ring_enable() local
10424 rx_ring = adapter->rx_ring[ring]; in ixgbe_txrx_ring_enable()
10429 napi_enable(&rx_ring->q_vector->napi); in ixgbe_txrx_ring_enable()
10434 ixgbe_configure_rx_ring(adapter, rx_ring); in ixgbe_txrx_ring_enable()
10933 u64_stats_init(&adapter->rx_ring[i]->syncp); in ixgbe_probe()