Lines Matching refs:rx_ring
67 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument
80 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
85 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page()
90 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page()
93 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
109 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument
113 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers()
119 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers()
120 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers()
121 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
124 if (!fm10k_alloc_mapped_page(rx_ring, bi)) in fm10k_alloc_rx_buffers()
136 rx_desc = FM10K_RX_DESC(rx_ring, 0); in fm10k_alloc_rx_buffers()
137 bi = rx_ring->rx_buffer; in fm10k_alloc_rx_buffers()
138 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
147 i += rx_ring->count; in fm10k_alloc_rx_buffers()
149 if (rx_ring->next_to_use != i) { in fm10k_alloc_rx_buffers()
151 rx_ring->next_to_use = i; in fm10k_alloc_rx_buffers()
154 rx_ring->next_to_alloc = i; in fm10k_alloc_rx_buffers()
164 writel(i, rx_ring->tail); in fm10k_alloc_rx_buffers()
175 static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, in fm10k_reuse_rx_page() argument
179 u16 nta = rx_ring->next_to_alloc; in fm10k_reuse_rx_page()
181 new_buff = &rx_ring->rx_buffer[nta]; in fm10k_reuse_rx_page()
185 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in fm10k_reuse_rx_page()
191 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, in fm10k_reuse_rx_page()
296 static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, in fm10k_fetch_rx_buffer() argument
304 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; in fm10k_fetch_rx_buffer()
316 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer()
319 rx_ring->rx_stats.alloc_failed++; in fm10k_fetch_rx_buffer()
331 dma_sync_single_range_for_cpu(rx_ring->dev, in fm10k_fetch_rx_buffer()
340 fm10k_reuse_rx_page(rx_ring, rx_buffer); in fm10k_fetch_rx_buffer()
343 dma_unmap_page(rx_ring->dev, rx_buffer->dma, in fm10k_fetch_rx_buffer()
408 static void fm10k_type_trans(struct fm10k_ring *rx_ring, in fm10k_type_trans() argument
412 struct net_device *dev = rx_ring->netdev; in fm10k_type_trans()
413 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); in fm10k_type_trans()
428 skb_record_rx_queue(skb, rx_ring->queue_index); in fm10k_type_trans()
446 static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, in fm10k_process_skb_fields() argument
452 fm10k_rx_hash(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
454 fm10k_rx_checksum(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
465 if ((vid & VLAN_VID_MASK) != rx_ring->vid) in fm10k_process_skb_fields()
472 fm10k_type_trans(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
487 static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, in fm10k_is_non_eop() argument
490 u32 ntc = rx_ring->next_to_clean + 1; in fm10k_is_non_eop()
493 ntc = (ntc < rx_ring->count) ? ntc : 0; in fm10k_is_non_eop()
494 rx_ring->next_to_clean = ntc; in fm10k_is_non_eop()
496 prefetch(FM10K_RX_DESC(rx_ring, ntc)); in fm10k_is_non_eop()
518 static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, in fm10k_cleanup_headers() argument
527 rx_ring->rx_stats.switch_errors++; in fm10k_cleanup_headers()
529 rx_ring->rx_stats.drops++; in fm10k_cleanup_headers()
531 rx_ring->rx_stats.pp_errors++; in fm10k_cleanup_headers()
533 rx_ring->rx_stats.link_errors++; in fm10k_cleanup_headers()
535 rx_ring->rx_stats.length_errors++; in fm10k_cleanup_headers()
537 rx_ring->rx_stats.errors++; in fm10k_cleanup_headers()
560 struct fm10k_ring *rx_ring, in fm10k_clean_rx_irq() argument
563 struct sk_buff *skb = rx_ring->skb; in fm10k_clean_rx_irq()
565 u16 cleaned_count = fm10k_desc_unused(rx_ring); in fm10k_clean_rx_irq()
572 fm10k_alloc_rx_buffers(rx_ring, cleaned_count); in fm10k_clean_rx_irq()
576 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); in fm10k_clean_rx_irq()
588 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); in fm10k_clean_rx_irq()
597 if (fm10k_is_non_eop(rx_ring, rx_desc)) in fm10k_clean_rx_irq()
601 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { in fm10k_clean_rx_irq()
607 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); in fm10k_clean_rx_irq()
619 rx_ring->skb = skb; in fm10k_clean_rx_irq()
621 u64_stats_update_begin(&rx_ring->syncp); in fm10k_clean_rx_irq()
622 rx_ring->stats.packets += total_packets; in fm10k_clean_rx_irq()
623 rx_ring->stats.bytes += total_bytes; in fm10k_clean_rx_irq()
624 u64_stats_update_end(&rx_ring->syncp); in fm10k_clean_rx_irq()
1664 interface->rx_ring[rxr_idx] = ring; in fm10k_alloc_q_vector()
1699 interface->rx_ring[ring->queue_index] = NULL; in fm10k_free_q_vector()
1877 interface->rx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1878 interface->rx_ring[offset + i]->qos_pc = pc; in fm10k_cache_ring_qos()
1897 interface->rx_ring[i]->reg_idx = i; in fm10k_cache_ring_rss()