Lines Matching refs:rx_ring
112 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
506 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument
510 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
511 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
522 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
524 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
528 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument
533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
537 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
548 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_put_rx_buffer() argument
554 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); in ixgbevf_put_rx_buffer()
560 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbevf_put_rx_buffer()
561 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_put_rx_buffer()
582 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, in ixgbevf_is_non_eop() argument
585 u32 ntc = rx_ring->next_to_clean + 1; in ixgbevf_is_non_eop()
588 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbevf_is_non_eop()
589 rx_ring->next_to_clean = ntc; in ixgbevf_is_non_eop()
591 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); in ixgbevf_is_non_eop()
599 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring) in ixgbevf_rx_offset() argument
601 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0; in ixgbevf_rx_offset()
604 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, in ixgbevf_alloc_mapped_page() argument
615 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring)); in ixgbevf_alloc_mapped_page()
617 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
622 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbevf_alloc_mapped_page()
623 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_alloc_mapped_page()
629 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbevf_alloc_mapped_page()
630 __free_pages(page, ixgbevf_rx_pg_order(rx_ring)); in ixgbevf_alloc_mapped_page()
632 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
638 bi->page_offset = ixgbevf_rx_offset(rx_ring); in ixgbevf_alloc_mapped_page()
640 rx_ring->rx_stats.alloc_rx_page++; in ixgbevf_alloc_mapped_page()
650 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, in ixgbevf_alloc_rx_buffers() argument
655 unsigned int i = rx_ring->next_to_use; in ixgbevf_alloc_rx_buffers()
658 if (!cleaned_count || !rx_ring->netdev) in ixgbevf_alloc_rx_buffers()
661 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); in ixgbevf_alloc_rx_buffers()
662 bi = &rx_ring->rx_buffer_info[i]; in ixgbevf_alloc_rx_buffers()
663 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
666 if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) in ixgbevf_alloc_rx_buffers()
670 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbevf_alloc_rx_buffers()
672 ixgbevf_rx_bufsz(rx_ring), in ixgbevf_alloc_rx_buffers()
684 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); in ixgbevf_alloc_rx_buffers()
685 bi = rx_ring->rx_buffer_info; in ixgbevf_alloc_rx_buffers()
686 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
695 i += rx_ring->count; in ixgbevf_alloc_rx_buffers()
697 if (rx_ring->next_to_use != i) { in ixgbevf_alloc_rx_buffers()
699 rx_ring->next_to_use = i; in ixgbevf_alloc_rx_buffers()
702 rx_ring->next_to_alloc = i; in ixgbevf_alloc_rx_buffers()
710 ixgbevf_write_tail(rx_ring, i); in ixgbevf_alloc_rx_buffers()
732 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, in ixgbevf_cleanup_headers() argument
743 struct net_device *netdev = rx_ring->netdev; in ixgbevf_cleanup_headers()
765 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, in ixgbevf_reuse_rx_page() argument
769 u16 nta = rx_ring->next_to_alloc; in ixgbevf_reuse_rx_page()
771 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbevf_reuse_rx_page()
775 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbevf_reuse_rx_page()
827 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, in ixgbevf_add_rx_frag() argument
833 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_add_rx_frag()
835 unsigned int truesize = ring_uses_build_skb(rx_ring) ? in ixgbevf_add_rx_frag()
849 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, in ixgbevf_construct_skb() argument
856 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_construct_skb()
883 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb()
924 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, in ixgbevf_build_skb() argument
931 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; in ixgbevf_build_skb()
1049 struct ixgbevf_ring *rx_ring, in ixgbevf_run_xdp() argument
1057 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbevf_run_xdp()
1067 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_run_xdp()
1077 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1087 static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, in ixgbevf_rx_frame_truesize() argument
1093 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbevf_rx_frame_truesize()
1095 truesize = ring_uses_build_skb(rx_ring) ? in ixgbevf_rx_frame_truesize()
1103 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, in ixgbevf_rx_buffer_flip() argument
1107 unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size); in ixgbevf_rx_buffer_flip()
1117 struct ixgbevf_ring *rx_ring, in ixgbevf_clean_rx_irq() argument
1122 u16 cleaned_count = ixgbevf_desc_unused(rx_ring); in ixgbevf_clean_rx_irq()
1123 struct sk_buff *skb = rx_ring->skb; in ixgbevf_clean_rx_irq()
1129 frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0); in ixgbevf_clean_rx_irq()
1131 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbevf_clean_rx_irq()
1140 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); in ixgbevf_clean_rx_irq()
1144 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbevf_clean_rx_irq()
1155 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size); in ixgbevf_clean_rx_irq()
1159 unsigned int offset = ixgbevf_rx_offset(rx_ring); in ixgbevf_clean_rx_irq()
1167 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); in ixgbevf_clean_rx_irq()
1169 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); in ixgbevf_clean_rx_irq()
1175 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1183 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); in ixgbevf_clean_rx_irq()
1184 } else if (ring_uses_build_skb(rx_ring)) { in ixgbevf_clean_rx_irq()
1185 skb = ixgbevf_build_skb(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1188 skb = ixgbevf_construct_skb(rx_ring, rx_buffer, in ixgbevf_clean_rx_irq()
1194 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbevf_clean_rx_irq()
1199 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb); in ixgbevf_clean_rx_irq()
1203 if (ixgbevf_is_non_eop(rx_ring, rx_desc)) in ixgbevf_clean_rx_irq()
1207 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { in ixgbevf_clean_rx_irq()
1220 ether_addr_equal(rx_ring->netdev->dev_addr, in ixgbevf_clean_rx_irq()
1227 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); in ixgbevf_clean_rx_irq()
1239 rx_ring->skb = skb; in ixgbevf_clean_rx_irq()
1243 adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_clean_rx_irq()
1252 u64_stats_update_begin(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1253 rx_ring->stats.packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1254 rx_ring->stats.bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1255 u64_stats_update_end(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1975 struct ixgbevf_ring *rx_ring) in ixgbevf_set_rx_buffer_len() argument
1981 clear_ring_build_skb_enabled(rx_ring); in ixgbevf_set_rx_buffer_len()
1982 clear_ring_uses_large_buffer(rx_ring); in ixgbevf_set_rx_buffer_len()
1987 set_ring_build_skb_enabled(rx_ring); in ixgbevf_set_rx_buffer_len()
1993 set_ring_uses_large_buffer(rx_ring); in ixgbevf_set_rx_buffer_len()
2025 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_configure_rx() local
2027 ixgbevf_set_rx_buffer_len(adapter, rx_ring); in ixgbevf_configure_rx()
2028 ixgbevf_configure_rx_ring(adapter, rx_ring); in ixgbevf_configure_rx()
2335 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) in ixgbevf_clean_rx_ring() argument
2337 u16 i = rx_ring->next_to_clean; in ixgbevf_clean_rx_ring()
2340 if (rx_ring->skb) { in ixgbevf_clean_rx_ring()
2341 dev_kfree_skb(rx_ring->skb); in ixgbevf_clean_rx_ring()
2342 rx_ring->skb = NULL; in ixgbevf_clean_rx_ring()
2346 while (i != rx_ring->next_to_alloc) { in ixgbevf_clean_rx_ring()
2349 rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbevf_clean_rx_ring()
2354 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_clean_rx_ring()
2357 ixgbevf_rx_bufsz(rx_ring), in ixgbevf_clean_rx_ring()
2361 dma_unmap_page_attrs(rx_ring->dev, in ixgbevf_clean_rx_ring()
2363 ixgbevf_rx_pg_size(rx_ring), in ixgbevf_clean_rx_ring()
2371 if (i == rx_ring->count) in ixgbevf_clean_rx_ring()
2375 rx_ring->next_to_alloc = 0; in ixgbevf_clean_rx_ring()
2376 rx_ring->next_to_clean = 0; in ixgbevf_clean_rx_ring()
2377 rx_ring->next_to_use = 0; in ixgbevf_clean_rx_ring()
2451 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); in ixgbevf_clean_all_rx_rings()
2480 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbevf_down()
2804 adapter->rx_ring[rxr_idx] = ring; in ixgbevf_alloc_q_vector()
2839 adapter->rx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
3133 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_update_stats() local
3135 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbevf_update_stats()
3136 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbevf_update_stats()
3137 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbevf_update_stats()
3138 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbevf_update_stats()
3469 struct ixgbevf_ring *rx_ring) in ixgbevf_setup_rx_resources() argument
3473 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; in ixgbevf_setup_rx_resources()
3474 rx_ring->rx_buffer_info = vmalloc(size); in ixgbevf_setup_rx_resources()
3475 if (!rx_ring->rx_buffer_info) in ixgbevf_setup_rx_resources()
3478 u64_stats_init(&rx_ring->syncp); in ixgbevf_setup_rx_resources()
3481 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbevf_setup_rx_resources()
3482 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbevf_setup_rx_resources()
3484 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, in ixgbevf_setup_rx_resources()
3485 &rx_ring->dma, GFP_KERNEL); in ixgbevf_setup_rx_resources()
3487 if (!rx_ring->desc) in ixgbevf_setup_rx_resources()
3491 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbevf_setup_rx_resources()
3492 rx_ring->queue_index, 0) < 0) in ixgbevf_setup_rx_resources()
3495 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbevf_setup_rx_resources()
3499 vfree(rx_ring->rx_buffer_info); in ixgbevf_setup_rx_resources()
3500 rx_ring->rx_buffer_info = NULL; in ixgbevf_setup_rx_resources()
3501 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); in ixgbevf_setup_rx_resources()
3520 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3531 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3541 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) in ixgbevf_free_rx_resources() argument
3543 ixgbevf_clean_rx_ring(rx_ring); in ixgbevf_free_rx_resources()
3545 rx_ring->xdp_prog = NULL; in ixgbevf_free_rx_resources()
3546 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbevf_free_rx_resources()
3547 vfree(rx_ring->rx_buffer_info); in ixgbevf_free_rx_resources()
3548 rx_ring->rx_buffer_info = NULL; in ixgbevf_free_rx_resources()
3550 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, in ixgbevf_free_rx_resources()
3551 rx_ring->dma); in ixgbevf_free_rx_resources()
3553 rx_ring->desc = NULL; in ixgbevf_free_rx_resources()
3567 if (adapter->rx_ring[i]->desc) in ixgbevf_free_all_rx_resources()
3568 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_free_all_rx_resources()
4359 ring = adapter->rx_ring[i]; in ixgbevf_get_stats()
4423 struct ixgbevf_ring *ring = adapter->rx_ring[i]; in ixgbevf_xdp_setup()
4447 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); in ixgbevf_xdp_setup()