Lines Matching refs:rx_ring

655 void iavf_clean_rx_ring(struct iavf_ring *rx_ring)  in iavf_clean_rx_ring()  argument
661 if (!rx_ring->rx_bi) in iavf_clean_rx_ring()
664 if (rx_ring->skb) { in iavf_clean_rx_ring()
665 dev_kfree_skb(rx_ring->skb); in iavf_clean_rx_ring()
666 rx_ring->skb = NULL; in iavf_clean_rx_ring()
670 for (i = 0; i < rx_ring->count; i++) { in iavf_clean_rx_ring()
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; in iavf_clean_rx_ring()
679 dma_sync_single_range_for_cpu(rx_ring->dev, in iavf_clean_rx_ring()
682 rx_ring->rx_buf_len, in iavf_clean_rx_ring()
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in iavf_clean_rx_ring()
687 iavf_rx_pg_size(rx_ring), in iavf_clean_rx_ring()
697 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; in iavf_clean_rx_ring()
698 memset(rx_ring->rx_bi, 0, bi_size); in iavf_clean_rx_ring()
701 memset(rx_ring->desc, 0, rx_ring->size); in iavf_clean_rx_ring()
703 rx_ring->next_to_alloc = 0; in iavf_clean_rx_ring()
704 rx_ring->next_to_clean = 0; in iavf_clean_rx_ring()
705 rx_ring->next_to_use = 0; in iavf_clean_rx_ring()
714 void iavf_free_rx_resources(struct iavf_ring *rx_ring) in iavf_free_rx_resources() argument
716 iavf_clean_rx_ring(rx_ring); in iavf_free_rx_resources()
717 kfree(rx_ring->rx_bi); in iavf_free_rx_resources()
718 rx_ring->rx_bi = NULL; in iavf_free_rx_resources()
720 if (rx_ring->desc) { in iavf_free_rx_resources()
721 dma_free_coherent(rx_ring->dev, rx_ring->size, in iavf_free_rx_resources()
722 rx_ring->desc, rx_ring->dma); in iavf_free_rx_resources()
723 rx_ring->desc = NULL; in iavf_free_rx_resources()
733 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) in iavf_setup_rx_descriptors() argument
735 struct device *dev = rx_ring->dev; in iavf_setup_rx_descriptors()
739 WARN_ON(rx_ring->rx_bi); in iavf_setup_rx_descriptors()
740 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; in iavf_setup_rx_descriptors()
741 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); in iavf_setup_rx_descriptors()
742 if (!rx_ring->rx_bi) in iavf_setup_rx_descriptors()
745 u64_stats_init(&rx_ring->syncp); in iavf_setup_rx_descriptors()
748 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); in iavf_setup_rx_descriptors()
749 rx_ring->size = ALIGN(rx_ring->size, 4096); in iavf_setup_rx_descriptors()
750 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in iavf_setup_rx_descriptors()
751 &rx_ring->dma, GFP_KERNEL); in iavf_setup_rx_descriptors()
753 if (!rx_ring->desc) { in iavf_setup_rx_descriptors()
755 rx_ring->size); in iavf_setup_rx_descriptors()
759 rx_ring->next_to_alloc = 0; in iavf_setup_rx_descriptors()
760 rx_ring->next_to_clean = 0; in iavf_setup_rx_descriptors()
761 rx_ring->next_to_use = 0; in iavf_setup_rx_descriptors()
765 kfree(rx_ring->rx_bi); in iavf_setup_rx_descriptors()
766 rx_ring->rx_bi = NULL; in iavf_setup_rx_descriptors()
775 static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) in iavf_release_rx_desc() argument
777 rx_ring->next_to_use = val; in iavf_release_rx_desc()
780 rx_ring->next_to_alloc = val; in iavf_release_rx_desc()
788 writel(val, rx_ring->tail); in iavf_release_rx_desc()
797 static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) in iavf_rx_offset() argument
799 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; in iavf_rx_offset()
810 static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, in iavf_alloc_mapped_page() argument
818 rx_ring->rx_stats.page_reuse_count++; in iavf_alloc_mapped_page()
823 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); in iavf_alloc_mapped_page()
825 rx_ring->rx_stats.alloc_page_failed++; in iavf_alloc_mapped_page()
830 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in iavf_alloc_mapped_page()
831 iavf_rx_pg_size(rx_ring), in iavf_alloc_mapped_page()
838 if (dma_mapping_error(rx_ring->dev, dma)) { in iavf_alloc_mapped_page()
839 __free_pages(page, iavf_rx_pg_order(rx_ring)); in iavf_alloc_mapped_page()
840 rx_ring->rx_stats.alloc_page_failed++; in iavf_alloc_mapped_page()
846 bi->page_offset = iavf_rx_offset(rx_ring); in iavf_alloc_mapped_page()
860 static void iavf_receive_skb(struct iavf_ring *rx_ring, in iavf_receive_skb() argument
863 struct iavf_q_vector *q_vector = rx_ring->q_vector; in iavf_receive_skb()
865 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && in iavf_receive_skb()
879 bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) in iavf_alloc_rx_buffers() argument
881 u16 ntu = rx_ring->next_to_use; in iavf_alloc_rx_buffers()
886 if (!rx_ring->netdev || !cleaned_count) in iavf_alloc_rx_buffers()
889 rx_desc = IAVF_RX_DESC(rx_ring, ntu); in iavf_alloc_rx_buffers()
890 bi = &rx_ring->rx_bi[ntu]; in iavf_alloc_rx_buffers()
893 if (!iavf_alloc_mapped_page(rx_ring, bi)) in iavf_alloc_rx_buffers()
897 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in iavf_alloc_rx_buffers()
899 rx_ring->rx_buf_len, in iavf_alloc_rx_buffers()
910 if (unlikely(ntu == rx_ring->count)) { in iavf_alloc_rx_buffers()
911 rx_desc = IAVF_RX_DESC(rx_ring, 0); in iavf_alloc_rx_buffers()
912 bi = rx_ring->rx_bi; in iavf_alloc_rx_buffers()
922 if (rx_ring->next_to_use != ntu) in iavf_alloc_rx_buffers()
923 iavf_release_rx_desc(rx_ring, ntu); in iavf_alloc_rx_buffers()
928 if (rx_ring->next_to_use != ntu) in iavf_alloc_rx_buffers()
929 iavf_release_rx_desc(rx_ring, ntu); in iavf_alloc_rx_buffers()
1082 void iavf_process_skb_fields(struct iavf_ring *rx_ring, in iavf_process_skb_fields() argument
1086 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); in iavf_process_skb_fields()
1088 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); in iavf_process_skb_fields()
1090 skb_record_rx_queue(skb, rx_ring->queue_index); in iavf_process_skb_fields()
1093 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in iavf_process_skb_fields()
1109 static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) in iavf_cleanup_headers() argument
1125 static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, in iavf_reuse_rx_page() argument
1129 u16 nta = rx_ring->next_to_alloc; in iavf_reuse_rx_page()
1131 new_buff = &rx_ring->rx_bi[nta]; in iavf_reuse_rx_page()
1135 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in iavf_reuse_rx_page()
1215 static void iavf_add_rx_frag(struct iavf_ring *rx_ring, in iavf_add_rx_frag() argument
1221 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; in iavf_add_rx_frag()
1223 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); in iavf_add_rx_frag()
1248 static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, in iavf_get_rx_buffer() argument
1256 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; in iavf_get_rx_buffer()
1260 dma_sync_single_range_for_cpu(rx_ring->dev, in iavf_get_rx_buffer()
1282 static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, in iavf_construct_skb() argument
1288 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; in iavf_construct_skb()
1302 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in iavf_construct_skb()
1346 static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, in iavf_build_skb() argument
1352 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; in iavf_build_skb()
1392 static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, in iavf_put_rx_buffer() argument
1400 iavf_reuse_rx_page(rx_ring, rx_buffer); in iavf_put_rx_buffer()
1401 rx_ring->rx_stats.page_reuse_count++; in iavf_put_rx_buffer()
1404 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in iavf_put_rx_buffer()
1405 iavf_rx_pg_size(rx_ring), in iavf_put_rx_buffer()
1426 static bool iavf_is_non_eop(struct iavf_ring *rx_ring, in iavf_is_non_eop() argument
1430 u32 ntc = rx_ring->next_to_clean + 1; in iavf_is_non_eop()
1433 ntc = (ntc < rx_ring->count) ? ntc : 0; in iavf_is_non_eop()
1434 rx_ring->next_to_clean = ntc; in iavf_is_non_eop()
1436 prefetch(IAVF_RX_DESC(rx_ring, ntc)); in iavf_is_non_eop()
1443 rx_ring->rx_stats.non_eop_descs++; in iavf_is_non_eop()
1460 static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) in iavf_clean_rx_irq() argument
1463 struct sk_buff *skb = rx_ring->skb; in iavf_clean_rx_irq()
1464 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); in iavf_clean_rx_irq()
1478 iavf_alloc_rx_buffers(rx_ring, cleaned_count); in iavf_clean_rx_irq()
1482 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in iavf_clean_rx_irq()
1503 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); in iavf_clean_rx_irq()
1504 rx_buffer = iavf_get_rx_buffer(rx_ring, size); in iavf_clean_rx_irq()
1508 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); in iavf_clean_rx_irq()
1509 else if (ring_uses_build_skb(rx_ring)) in iavf_clean_rx_irq()
1510 skb = iavf_build_skb(rx_ring, rx_buffer, size); in iavf_clean_rx_irq()
1512 skb = iavf_construct_skb(rx_ring, rx_buffer, size); in iavf_clean_rx_irq()
1516 rx_ring->rx_stats.alloc_buff_failed++; in iavf_clean_rx_irq()
1522 iavf_put_rx_buffer(rx_ring, rx_buffer); in iavf_clean_rx_irq()
1525 if (iavf_is_non_eop(rx_ring, rx_desc, skb)) in iavf_clean_rx_irq()
1539 if (iavf_cleanup_headers(rx_ring, skb)) { in iavf_clean_rx_irq()
1552 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); in iavf_clean_rx_irq()
1558 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); in iavf_clean_rx_irq()
1559 iavf_receive_skb(rx_ring, skb, vlan_tag); in iavf_clean_rx_irq()
1566 rx_ring->skb = skb; in iavf_clean_rx_irq()
1568 u64_stats_update_begin(&rx_ring->syncp); in iavf_clean_rx_irq()
1569 rx_ring->stats.packets += total_rx_packets; in iavf_clean_rx_irq()
1570 rx_ring->stats.bytes += total_rx_bytes; in iavf_clean_rx_irq()
1571 u64_stats_update_end(&rx_ring->syncp); in iavf_clean_rx_irq()
1572 rx_ring->q_vector->rx.total_packets += total_rx_packets; in iavf_clean_rx_irq()
1573 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in iavf_clean_rx_irq()