Lines Matching refs:rx_ring

685 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,  in i40e_fd_handle_status()  argument
688 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
1355 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument
1357 return &rx_ring->rx_bi[idx]; in i40e_rx_bi()
1367 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, in i40e_reuse_rx_page() argument
1371 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page()
1373 new_buff = i40e_rx_bi(rx_ring, nta); in i40e_reuse_rx_page()
1377 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page()
1385 rx_ring->rx_stats.page_reuse_count++; in i40e_reuse_rx_page()
1403 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_clean_programming_status() argument
1412 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); in i40e_clean_programming_status()
1473 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) in i40e_alloc_rx_bi() argument
1475 unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; in i40e_alloc_rx_bi()
1477 rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); in i40e_alloc_rx_bi()
1478 return rx_ring->rx_bi ? 0 : -ENOMEM; in i40e_alloc_rx_bi()
1481 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) in i40e_clear_rx_bi() argument
1483 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); in i40e_clear_rx_bi()
1490 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) in i40e_clean_rx_ring() argument
1495 if (!rx_ring->rx_bi) in i40e_clean_rx_ring()
1498 if (rx_ring->skb) { in i40e_clean_rx_ring()
1499 dev_kfree_skb(rx_ring->skb); in i40e_clean_rx_ring()
1500 rx_ring->skb = NULL; in i40e_clean_rx_ring()
1503 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring()
1504 i40e_xsk_clean_rx_ring(rx_ring); in i40e_clean_rx_ring()
1509 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1510 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i); in i40e_clean_rx_ring()
1518 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_ring()
1521 rx_ring->rx_buf_len, in i40e_clean_rx_ring()
1525 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in i40e_clean_rx_ring()
1526 i40e_rx_pg_size(rx_ring), in i40e_clean_rx_ring()
1537 if (rx_ring->xsk_pool) in i40e_clean_rx_ring()
1538 i40e_clear_rx_bi_zc(rx_ring); in i40e_clean_rx_ring()
1540 i40e_clear_rx_bi(rx_ring); in i40e_clean_rx_ring()
1543 memset(rx_ring->desc, 0, rx_ring->size); in i40e_clean_rx_ring()
1545 rx_ring->next_to_alloc = 0; in i40e_clean_rx_ring()
1546 rx_ring->next_to_clean = 0; in i40e_clean_rx_ring()
1547 rx_ring->next_to_use = 0; in i40e_clean_rx_ring()
1556 void i40e_free_rx_resources(struct i40e_ring *rx_ring) in i40e_free_rx_resources() argument
1558 i40e_clean_rx_ring(rx_ring); in i40e_free_rx_resources()
1559 if (rx_ring->vsi->type == I40E_VSI_MAIN) in i40e_free_rx_resources()
1560 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in i40e_free_rx_resources()
1561 rx_ring->xdp_prog = NULL; in i40e_free_rx_resources()
1562 kfree(rx_ring->rx_bi); in i40e_free_rx_resources()
1563 rx_ring->rx_bi = NULL; in i40e_free_rx_resources()
1565 if (rx_ring->desc) { in i40e_free_rx_resources()
1566 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40e_free_rx_resources()
1567 rx_ring->desc, rx_ring->dma); in i40e_free_rx_resources()
1568 rx_ring->desc = NULL; in i40e_free_rx_resources()
1578 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) in i40e_setup_rx_descriptors() argument
1580 struct device *dev = rx_ring->dev; in i40e_setup_rx_descriptors()
1583 u64_stats_init(&rx_ring->syncp); in i40e_setup_rx_descriptors()
1586 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); in i40e_setup_rx_descriptors()
1587 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40e_setup_rx_descriptors()
1588 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40e_setup_rx_descriptors()
1589 &rx_ring->dma, GFP_KERNEL); in i40e_setup_rx_descriptors()
1591 if (!rx_ring->desc) { in i40e_setup_rx_descriptors()
1593 rx_ring->size); in i40e_setup_rx_descriptors()
1597 rx_ring->next_to_alloc = 0; in i40e_setup_rx_descriptors()
1598 rx_ring->next_to_clean = 0; in i40e_setup_rx_descriptors()
1599 rx_ring->next_to_use = 0; in i40e_setup_rx_descriptors()
1602 if (rx_ring->vsi->type == I40E_VSI_MAIN) { in i40e_setup_rx_descriptors()
1603 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, in i40e_setup_rx_descriptors()
1604 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); in i40e_setup_rx_descriptors()
1609 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; in i40e_setup_rx_descriptors()
1619 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) in i40e_release_rx_desc() argument
1621 rx_ring->next_to_use = val; in i40e_release_rx_desc()
1624 rx_ring->next_to_alloc = val; in i40e_release_rx_desc()
1632 writel(val, rx_ring->tail); in i40e_release_rx_desc()
1635 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring, in i40e_rx_frame_truesize() argument
1641 truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in i40e_rx_frame_truesize()
1643 truesize = rx_ring->rx_offset ? in i40e_rx_frame_truesize()
1644 SKB_DATA_ALIGN(size + rx_ring->rx_offset) + in i40e_rx_frame_truesize()
1659 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, in i40e_alloc_mapped_page() argument
1667 rx_ring->rx_stats.page_reuse_count++; in i40e_alloc_mapped_page()
1672 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); in i40e_alloc_mapped_page()
1674 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1679 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in i40e_alloc_mapped_page()
1680 i40e_rx_pg_size(rx_ring), in i40e_alloc_mapped_page()
1687 if (dma_mapping_error(rx_ring->dev, dma)) { in i40e_alloc_mapped_page()
1688 __free_pages(page, i40e_rx_pg_order(rx_ring)); in i40e_alloc_mapped_page()
1689 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1695 bi->page_offset = rx_ring->rx_offset; in i40e_alloc_mapped_page()
1709 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) in i40e_alloc_rx_buffers() argument
1711 u16 ntu = rx_ring->next_to_use; in i40e_alloc_rx_buffers()
1716 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers()
1719 rx_desc = I40E_RX_DESC(rx_ring, ntu); in i40e_alloc_rx_buffers()
1720 bi = i40e_rx_bi(rx_ring, ntu); in i40e_alloc_rx_buffers()
1723 if (!i40e_alloc_mapped_page(rx_ring, bi)) in i40e_alloc_rx_buffers()
1727 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in i40e_alloc_rx_buffers()
1729 rx_ring->rx_buf_len, in i40e_alloc_rx_buffers()
1740 if (unlikely(ntu == rx_ring->count)) { in i40e_alloc_rx_buffers()
1741 rx_desc = I40E_RX_DESC(rx_ring, 0); in i40e_alloc_rx_buffers()
1742 bi = i40e_rx_bi(rx_ring, 0); in i40e_alloc_rx_buffers()
1752 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1753 i40e_release_rx_desc(rx_ring, ntu); in i40e_alloc_rx_buffers()
1758 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1759 i40e_release_rx_desc(rx_ring, ntu); in i40e_alloc_rx_buffers()
1917 void i40e_process_skb_fields(struct i40e_ring *rx_ring, in i40e_process_skb_fields() argument
1930 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); in i40e_process_skb_fields()
1932 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); in i40e_process_skb_fields()
1934 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); in i40e_process_skb_fields()
1936 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_process_skb_fields()
1946 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_process_skb_fields()
1960 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, in i40e_cleanup_headers() argument
2037 static void i40e_add_rx_frag(struct i40e_ring *rx_ring, in i40e_add_rx_frag() argument
2043 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; in i40e_add_rx_frag()
2045 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); in i40e_add_rx_frag()
2068 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, in i40e_get_rx_buffer() argument
2074 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_get_rx_buffer()
2084 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_get_rx_buffer()
2106 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, in i40e_construct_skb() argument
2112 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; in i40e_construct_skb()
2139 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in i40e_construct_skb()
2185 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, in i40e_build_skb() argument
2191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; in i40e_build_skb()
2236 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, in i40e_put_rx_buffer() argument
2242 i40e_reuse_rx_page(rx_ring, rx_buffer); in i40e_put_rx_buffer()
2245 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in i40e_put_rx_buffer()
2246 i40e_rx_pg_size(rx_ring), in i40e_put_rx_buffer()
2263 static bool i40e_is_non_eop(struct i40e_ring *rx_ring, in i40e_is_non_eop() argument
2271 rx_ring->rx_stats.non_eop_descs++; in i40e_is_non_eop()
2294 static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) in i40e_run_xdp() argument
2301 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in i40e_run_xdp()
2313 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp()
2319 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp()
2329 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2345 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, in i40e_rx_buffer_flip() argument
2349 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size); in i40e_rx_buffer_flip()
2381 void i40e_update_rx_stats(struct i40e_ring *rx_ring, in i40e_update_rx_stats() argument
2385 u64_stats_update_begin(&rx_ring->syncp); in i40e_update_rx_stats()
2386 rx_ring->stats.packets += total_rx_packets; in i40e_update_rx_stats()
2387 rx_ring->stats.bytes += total_rx_bytes; in i40e_update_rx_stats()
2388 u64_stats_update_end(&rx_ring->syncp); in i40e_update_rx_stats()
2389 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_update_rx_stats()
2390 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_update_rx_stats()
2402 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) in i40e_finalize_xdp_rx() argument
2409 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_finalize_xdp_rx()
2419 static void i40e_inc_ntc(struct i40e_ring *rx_ring) in i40e_inc_ntc() argument
2421 u32 ntc = rx_ring->next_to_clean + 1; in i40e_inc_ntc()
2423 ntc = (ntc < rx_ring->count) ? ntc : 0; in i40e_inc_ntc()
2424 rx_ring->next_to_clean = ntc; in i40e_inc_ntc()
2425 prefetch(I40E_RX_DESC(rx_ring, ntc)); in i40e_inc_ntc()
2440 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq() argument
2443 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq()
2444 unsigned int offset = rx_ring->rx_offset; in i40e_clean_rx_irq()
2445 struct sk_buff *skb = rx_ring->skb; in i40e_clean_rx_irq()
2452 frame_sz = i40e_rx_frame_truesize(rx_ring, 0); in i40e_clean_rx_irq()
2454 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in i40e_clean_rx_irq()
2466 i40e_alloc_rx_buffers(rx_ring, cleaned_count); in i40e_clean_rx_irq()
2470 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); in i40e_clean_rx_irq()
2486 i40e_clean_programming_status(rx_ring, in i40e_clean_rx_irq()
2489 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_clean_rx_irq()
2490 i40e_inc_ntc(rx_ring); in i40e_clean_rx_irq()
2491 i40e_reuse_rx_page(rx_ring, rx_buffer); in i40e_clean_rx_irq()
2501 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); in i40e_clean_rx_irq()
2502 rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); in i40e_clean_rx_irq()
2513 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); in i40e_clean_rx_irq()
2515 xdp_res = i40e_run_xdp(rx_ring, &xdp); in i40e_clean_rx_irq()
2521 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); in i40e_clean_rx_irq()
2528 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); in i40e_clean_rx_irq()
2529 } else if (ring_uses_build_skb(rx_ring)) { in i40e_clean_rx_irq()
2530 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); in i40e_clean_rx_irq()
2532 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); in i40e_clean_rx_irq()
2537 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq()
2542 i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); in i40e_clean_rx_irq()
2545 i40e_inc_ntc(rx_ring); in i40e_clean_rx_irq()
2546 if (i40e_is_non_eop(rx_ring, rx_desc)) in i40e_clean_rx_irq()
2549 if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) { in i40e_clean_rx_irq()
2558 i40e_process_skb_fields(rx_ring, rx_desc, skb); in i40e_clean_rx_irq()
2560 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); in i40e_clean_rx_irq()
2561 napi_gro_receive(&rx_ring->q_vector->napi, skb); in i40e_clean_rx_irq()
2568 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); in i40e_clean_rx_irq()
2569 rx_ring->skb = skb; in i40e_clean_rx_irq()
2571 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); in i40e_clean_rx_irq()