Lines Matching refs:rx_ring
112 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
506 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument
510 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
511 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
522 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
524 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
528 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument
533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
537 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
548 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_put_rx_buffer() argument
554 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); in ixgbevf_put_rx_buffer()
560 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbevf_put_rx_buffer()
561 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_put_rx_buffer()
582 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, in ixgbevf_is_non_eop() argument
585 u32 ntc = rx_ring->next_to_clean + 1; in ixgbevf_is_non_eop()
588 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbevf_is_non_eop()
589 rx_ring->next_to_clean = ntc; in ixgbevf_is_non_eop()
591 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); in ixgbevf_is_non_eop()
599 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring) in ixgbevf_rx_offset() argument
601 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0; in ixgbevf_rx_offset()
604 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, in ixgbevf_alloc_mapped_page() argument
615 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); in ixgbevf_alloc_mapped_page()
617 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
622 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbevf_alloc_mapped_page()
623 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_alloc_mapped_page()
629 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbevf_alloc_mapped_page()
630 __free_pages(page, ixgbevf_rx_pg_order(rx_ring)); in ixgbevf_alloc_mapped_page()
632 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
638 bi->page_offset = ixgbevf_rx_offset(rx_ring); in ixgbevf_alloc_mapped_page()
640 rx_ring->rx_stats.alloc_rx_page++; in ixgbevf_alloc_mapped_page()
650 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, in ixgbevf_alloc_rx_buffers() argument
655 unsigned int i = rx_ring->next_to_use; in ixgbevf_alloc_rx_buffers()
658 if (!cleaned_count || !rx_ring->netdev) in ixgbevf_alloc_rx_buffers()
661 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); in ixgbevf_alloc_rx_buffers()
662 bi = &rx_ring->rx_buffer_info[i]; in ixgbevf_alloc_rx_buffers()
663 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
666 if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) in ixgbevf_alloc_rx_buffers()
670 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbevf_alloc_rx_buffers()
672 ixgbevf_rx_bufsz(rx_ring), in ixgbevf_alloc_rx_buffers()
684 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); in ixgbevf_alloc_rx_buffers()
685 bi = rx_ring->rx_buffer_info; in ixgbevf_alloc_rx_buffers()
686 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
695 i += rx_ring->count; in ixgbevf_alloc_rx_buffers()
697 if (rx_ring->next_to_use != i) { in ixgbevf_alloc_rx_buffers()
699 rx_ring->next_to_use = i; in ixgbevf_alloc_rx_buffers()
702 rx_ring->next_to_alloc = i; in ixgbevf_alloc_rx_buffers()
710 ixgbevf_write_tail(rx_ring, i); in ixgbevf_alloc_rx_buffers()
732 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, in ixgbevf_cleanup_headers() argument
743 struct net_device *netdev = rx_ring->netdev; in ixgbevf_cleanup_headers()
765 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, in ixgbevf_reuse_rx_page() argument
769 u16 nta = rx_ring->next_to_alloc; in ixgbevf_reuse_rx_page()
771 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbevf_reuse_rx_page()
775 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbevf_reuse_rx_page()
832 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, in ixgbevf_add_rx_frag() argument
838 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_add_rx_frag()
840 unsigned int truesize = ring_uses_build_skb(rx_ring) ? in ixgbevf_add_rx_frag()
854 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, in ixgbevf_construct_skb() argument
861 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_construct_skb()
888 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb()
929 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, in ixgbevf_build_skb() argument
936 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_build_skb()
1054 struct ixgbevf_ring *rx_ring, in ixgbevf_run_xdp() argument
1063 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbevf_run_xdp()
1073 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_run_xdp()
1080 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1091 static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, in ixgbevf_rx_frame_truesize() argument
1097 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbevf_rx_frame_truesize()
1099 truesize = ring_uses_build_skb(rx_ring) ? in ixgbevf_rx_frame_truesize()
1107 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, in ixgbevf_rx_buffer_flip() argument
1111 unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size); in ixgbevf_rx_buffer_flip()
1121 struct ixgbevf_ring *rx_ring, in ixgbevf_clean_rx_irq() argument
1126 u16 cleaned_count = ixgbevf_desc_unused(rx_ring); in ixgbevf_clean_rx_irq()
1127 struct sk_buff *skb = rx_ring->skb; in ixgbevf_clean_rx_irq()
1131 xdp.rxq = &rx_ring->xdp_rxq; in ixgbevf_clean_rx_irq()
1135 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0); in ixgbevf_clean_rx_irq()
1145 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbevf_clean_rx_irq()
1149 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbevf_clean_rx_irq()
1160 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); in ixgbevf_clean_rx_irq()
1168 ixgbevf_rx_offset(rx_ring); in ixgbevf_clean_rx_irq()
1172 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); in ixgbevf_clean_rx_irq()
1174 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); in ixgbevf_clean_rx_irq()
1180 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1188 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); in ixgbevf_clean_rx_irq()
1189 } else if (ring_uses_build_skb(rx_ring)) { in ixgbevf_clean_rx_irq()
1190 skb = ixgbevf_build_skb(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1193 skb = ixgbevf_construct_skb(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1199 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbevf_clean_rx_irq()
1204 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb); in ixgbevf_clean_rx_irq()
1208 if (ixgbevf_is_non_eop(rx_ring, rx_desc)) in ixgbevf_clean_rx_irq()
1212 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { in ixgbevf_clean_rx_irq()
1225 ether_addr_equal(rx_ring->netdev->dev_addr, in ixgbevf_clean_rx_irq()
1232 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); in ixgbevf_clean_rx_irq()
1244 rx_ring->skb = skb; in ixgbevf_clean_rx_irq()
1248 adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_clean_rx_irq()
1257 u64_stats_update_begin(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1258 rx_ring->stats.packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1259 rx_ring->stats.bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1260 u64_stats_update_end(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1980 struct ixgbevf_ring *rx_ring) in ixgbevf_set_rx_buffer_len() argument
1986 clear_ring_build_skb_enabled(rx_ring); in ixgbevf_set_rx_buffer_len()
1987 clear_ring_uses_large_buffer(rx_ring); in ixgbevf_set_rx_buffer_len()
1992 set_ring_build_skb_enabled(rx_ring); in ixgbevf_set_rx_buffer_len()
1998 set_ring_uses_large_buffer(rx_ring); in ixgbevf_set_rx_buffer_len()
2030 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_configure_rx() local
2032 ixgbevf_set_rx_buffer_len(adapter, rx_ring); in ixgbevf_configure_rx()
2033 ixgbevf_configure_rx_ring(adapter, rx_ring); in ixgbevf_configure_rx()
2340 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) in ixgbevf_clean_rx_ring() argument
2342 u16 i = rx_ring->next_to_clean; in ixgbevf_clean_rx_ring()
2345 if (rx_ring->skb) { in ixgbevf_clean_rx_ring()
2346 dev_kfree_skb(rx_ring->skb); in ixgbevf_clean_rx_ring()
2347 rx_ring->skb = NULL; in ixgbevf_clean_rx_ring()
2351 while (i != rx_ring->next_to_alloc) { in ixgbevf_clean_rx_ring()
2354 rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbevf_clean_rx_ring()
2359 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_clean_rx_ring()
2362 ixgbevf_rx_bufsz(rx_ring), in ixgbevf_clean_rx_ring()
2366 dma_unmap_page_attrs(rx_ring->dev, in ixgbevf_clean_rx_ring()
2368 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_clean_rx_ring()
2376 if (i == rx_ring->count) in ixgbevf_clean_rx_ring()
2380 rx_ring->next_to_alloc = 0; in ixgbevf_clean_rx_ring()
2381 rx_ring->next_to_clean = 0; in ixgbevf_clean_rx_ring()
2382 rx_ring->next_to_use = 0; in ixgbevf_clean_rx_ring()
2456 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); in ixgbevf_clean_all_rx_rings()
2485 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbevf_down()
2808 adapter->rx_ring[rxr_idx] = ring; in ixgbevf_alloc_q_vector()
2843 adapter->rx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
3137 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_update_stats() local
3139 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbevf_update_stats()
3140 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbevf_update_stats()
3141 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbevf_update_stats()
3142 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbevf_update_stats()
3473 struct ixgbevf_ring *rx_ring) in ixgbevf_setup_rx_resources() argument
3477 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; in ixgbevf_setup_rx_resources()
3478 rx_ring->rx_buffer_info = vmalloc(size); in ixgbevf_setup_rx_resources()
3479 if (!rx_ring->rx_buffer_info) in ixgbevf_setup_rx_resources()
3482 u64_stats_init(&rx_ring->syncp); in ixgbevf_setup_rx_resources()
3485 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbevf_setup_rx_resources()
3486 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbevf_setup_rx_resources()
3488 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, in ixgbevf_setup_rx_resources()
3489 &rx_ring->dma, GFP_KERNEL); in ixgbevf_setup_rx_resources()
3491 if (!rx_ring->desc) in ixgbevf_setup_rx_resources()
3495 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbevf_setup_rx_resources()
3496 rx_ring->queue_index) < 0) in ixgbevf_setup_rx_resources()
3499 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbevf_setup_rx_resources()
3503 vfree(rx_ring->rx_buffer_info); in ixgbevf_setup_rx_resources()
3504 rx_ring->rx_buffer_info = NULL; in ixgbevf_setup_rx_resources()
3505 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); in ixgbevf_setup_rx_resources()
3524 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3535 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3545 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) in ixgbevf_free_rx_resources() argument
3547 ixgbevf_clean_rx_ring(rx_ring); in ixgbevf_free_rx_resources()
3549 rx_ring->xdp_prog = NULL; in ixgbevf_free_rx_resources()
3550 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbevf_free_rx_resources()
3551 vfree(rx_ring->rx_buffer_info); in ixgbevf_free_rx_resources()
3552 rx_ring->rx_buffer_info = NULL; in ixgbevf_free_rx_resources()
3554 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, in ixgbevf_free_rx_resources()
3555 rx_ring->dma); in ixgbevf_free_rx_resources()
3557 rx_ring->desc = NULL; in ixgbevf_free_rx_resources()
3571 if (adapter->rx_ring[i]->desc) in ixgbevf_free_all_rx_resources()
3572 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_free_all_rx_resources()
4375 ring = adapter->rx_ring[i]; in ixgbevf_get_stats()
4439 struct ixgbevf_ring *ring = adapter->rx_ring[i]; in ixgbevf_xdp_setup()
4463 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); in ixgbevf_xdp_setup()