Lines Matching refs:rx_ring
67 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument
80 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
85 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page()
90 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page()
93 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
109 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument
113 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers()
119 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers()
120 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers()
121 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
124 if (!fm10k_alloc_mapped_page(rx_ring, bi)) in fm10k_alloc_rx_buffers()
136 rx_desc = FM10K_RX_DESC(rx_ring, 0); in fm10k_alloc_rx_buffers()
137 bi = rx_ring->rx_buffer; in fm10k_alloc_rx_buffers()
138 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
147 i += rx_ring->count; in fm10k_alloc_rx_buffers()
149 if (rx_ring->next_to_use != i) { in fm10k_alloc_rx_buffers()
151 rx_ring->next_to_use = i; in fm10k_alloc_rx_buffers()
154 rx_ring->next_to_alloc = i; in fm10k_alloc_rx_buffers()
164 writel(i, rx_ring->tail); in fm10k_alloc_rx_buffers()
175 static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, in fm10k_reuse_rx_page() argument
179 u16 nta = rx_ring->next_to_alloc; in fm10k_reuse_rx_page()
181 new_buff = &rx_ring->rx_buffer[nta]; in fm10k_reuse_rx_page()
185 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in fm10k_reuse_rx_page()
191 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, in fm10k_reuse_rx_page()
291 static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, in fm10k_fetch_rx_buffer() argument
299 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; in fm10k_fetch_rx_buffer()
311 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in fm10k_fetch_rx_buffer()
314 rx_ring->rx_stats.alloc_failed++; in fm10k_fetch_rx_buffer()
326 dma_sync_single_range_for_cpu(rx_ring->dev, in fm10k_fetch_rx_buffer()
335 fm10k_reuse_rx_page(rx_ring, rx_buffer); in fm10k_fetch_rx_buffer()
338 dma_unmap_page(rx_ring->dev, rx_buffer->dma, in fm10k_fetch_rx_buffer()
403 static void fm10k_type_trans(struct fm10k_ring *rx_ring, in fm10k_type_trans() argument
407 struct net_device *dev = rx_ring->netdev; in fm10k_type_trans()
408 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); in fm10k_type_trans()
423 skb_record_rx_queue(skb, rx_ring->queue_index); in fm10k_type_trans()
441 static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, in fm10k_process_skb_fields() argument
447 fm10k_rx_hash(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
449 fm10k_rx_checksum(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
460 if ((vid & VLAN_VID_MASK) != rx_ring->vid) in fm10k_process_skb_fields()
467 fm10k_type_trans(rx_ring, rx_desc, skb); in fm10k_process_skb_fields()
482 static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, in fm10k_is_non_eop() argument
485 u32 ntc = rx_ring->next_to_clean + 1; in fm10k_is_non_eop()
488 ntc = (ntc < rx_ring->count) ? ntc : 0; in fm10k_is_non_eop()
489 rx_ring->next_to_clean = ntc; in fm10k_is_non_eop()
491 prefetch(FM10K_RX_DESC(rx_ring, ntc)); in fm10k_is_non_eop()
513 static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, in fm10k_cleanup_headers() argument
522 rx_ring->rx_stats.switch_errors++; in fm10k_cleanup_headers()
524 rx_ring->rx_stats.drops++; in fm10k_cleanup_headers()
526 rx_ring->rx_stats.pp_errors++; in fm10k_cleanup_headers()
528 rx_ring->rx_stats.link_errors++; in fm10k_cleanup_headers()
530 rx_ring->rx_stats.length_errors++; in fm10k_cleanup_headers()
532 rx_ring->rx_stats.errors++; in fm10k_cleanup_headers()
555 struct fm10k_ring *rx_ring, in fm10k_clean_rx_irq() argument
558 struct sk_buff *skb = rx_ring->skb; in fm10k_clean_rx_irq()
560 u16 cleaned_count = fm10k_desc_unused(rx_ring); in fm10k_clean_rx_irq()
567 fm10k_alloc_rx_buffers(rx_ring, cleaned_count); in fm10k_clean_rx_irq()
571 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); in fm10k_clean_rx_irq()
583 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); in fm10k_clean_rx_irq()
592 if (fm10k_is_non_eop(rx_ring, rx_desc)) in fm10k_clean_rx_irq()
596 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { in fm10k_clean_rx_irq()
602 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); in fm10k_clean_rx_irq()
614 rx_ring->skb = skb; in fm10k_clean_rx_irq()
616 u64_stats_update_begin(&rx_ring->syncp); in fm10k_clean_rx_irq()
617 rx_ring->stats.packets += total_packets; in fm10k_clean_rx_irq()
618 rx_ring->stats.bytes += total_bytes; in fm10k_clean_rx_irq()
619 u64_stats_update_end(&rx_ring->syncp); in fm10k_clean_rx_irq()
1659 interface->rx_ring[rxr_idx] = ring; in fm10k_alloc_q_vector()
1694 interface->rx_ring[ring->queue_index] = NULL; in fm10k_free_q_vector()
1872 interface->rx_ring[offset + i]->reg_idx = q_idx; in fm10k_cache_ring_qos()
1873 interface->rx_ring[offset + i]->qos_pc = pc; in fm10k_cache_ring_qos()
1892 interface->rx_ring[i]->reg_idx = i; in fm10k_cache_ring_rss()