Lines Matching refs:rx_ring

531 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,  in i40e_fd_handle_status()  argument
534 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
1199 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument
1201 return &rx_ring->rx_bi[idx]; in i40e_rx_bi()
1211 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, in i40e_reuse_rx_page() argument
1215 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page()
1217 new_buff = i40e_rx_bi(rx_ring, nta); in i40e_reuse_rx_page()
1221 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page()
1229 rx_ring->rx_stats.page_reuse_count++; in i40e_reuse_rx_page()
1247 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_clean_programming_status() argument
1256 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id); in i40e_clean_programming_status()
1308 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) in i40e_alloc_rx_bi() argument
1310 unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; in i40e_alloc_rx_bi()
1312 rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); in i40e_alloc_rx_bi()
1313 return rx_ring->rx_bi ? 0 : -ENOMEM; in i40e_alloc_rx_bi()
1316 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) in i40e_clear_rx_bi() argument
1318 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); in i40e_clear_rx_bi()
1325 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) in i40e_clean_rx_ring() argument
1330 if (!rx_ring->rx_bi) in i40e_clean_rx_ring()
1333 if (rx_ring->skb) { in i40e_clean_rx_ring()
1334 dev_kfree_skb(rx_ring->skb); in i40e_clean_rx_ring()
1335 rx_ring->skb = NULL; in i40e_clean_rx_ring()
1338 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring()
1339 i40e_xsk_clean_rx_ring(rx_ring); in i40e_clean_rx_ring()
1344 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1345 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i); in i40e_clean_rx_ring()
1353 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_ring()
1356 rx_ring->rx_buf_len, in i40e_clean_rx_ring()
1360 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in i40e_clean_rx_ring()
1361 i40e_rx_pg_size(rx_ring), in i40e_clean_rx_ring()
1372 if (rx_ring->xsk_pool) in i40e_clean_rx_ring()
1373 i40e_clear_rx_bi_zc(rx_ring); in i40e_clean_rx_ring()
1375 i40e_clear_rx_bi(rx_ring); in i40e_clean_rx_ring()
1378 memset(rx_ring->desc, 0, rx_ring->size); in i40e_clean_rx_ring()
1380 rx_ring->next_to_alloc = 0; in i40e_clean_rx_ring()
1381 rx_ring->next_to_clean = 0; in i40e_clean_rx_ring()
1382 rx_ring->next_to_use = 0; in i40e_clean_rx_ring()
1391 void i40e_free_rx_resources(struct i40e_ring *rx_ring) in i40e_free_rx_resources() argument
1393 i40e_clean_rx_ring(rx_ring); in i40e_free_rx_resources()
1394 if (rx_ring->vsi->type == I40E_VSI_MAIN) in i40e_free_rx_resources()
1395 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in i40e_free_rx_resources()
1396 rx_ring->xdp_prog = NULL; in i40e_free_rx_resources()
1397 kfree(rx_ring->rx_bi); in i40e_free_rx_resources()
1398 rx_ring->rx_bi = NULL; in i40e_free_rx_resources()
1400 if (rx_ring->desc) { in i40e_free_rx_resources()
1401 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40e_free_rx_resources()
1402 rx_ring->desc, rx_ring->dma); in i40e_free_rx_resources()
1403 rx_ring->desc = NULL; in i40e_free_rx_resources()
1413 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) in i40e_setup_rx_descriptors() argument
1415 struct device *dev = rx_ring->dev; in i40e_setup_rx_descriptors()
1418 u64_stats_init(&rx_ring->syncp); in i40e_setup_rx_descriptors()
1421 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); in i40e_setup_rx_descriptors()
1422 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40e_setup_rx_descriptors()
1423 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40e_setup_rx_descriptors()
1424 &rx_ring->dma, GFP_KERNEL); in i40e_setup_rx_descriptors()
1426 if (!rx_ring->desc) { in i40e_setup_rx_descriptors()
1428 rx_ring->size); in i40e_setup_rx_descriptors()
1432 rx_ring->next_to_alloc = 0; in i40e_setup_rx_descriptors()
1433 rx_ring->next_to_clean = 0; in i40e_setup_rx_descriptors()
1434 rx_ring->next_to_use = 0; in i40e_setup_rx_descriptors()
1437 if (rx_ring->vsi->type == I40E_VSI_MAIN) { in i40e_setup_rx_descriptors()
1438 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, in i40e_setup_rx_descriptors()
1439 rx_ring->queue_index); in i40e_setup_rx_descriptors()
1444 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; in i40e_setup_rx_descriptors()
1454 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) in i40e_release_rx_desc() argument
1456 rx_ring->next_to_use = val; in i40e_release_rx_desc()
1459 rx_ring->next_to_alloc = val; in i40e_release_rx_desc()
1467 writel(val, rx_ring->tail); in i40e_release_rx_desc()
1476 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) in i40e_rx_offset() argument
1478 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; in i40e_rx_offset()
1481 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring, in i40e_rx_frame_truesize() argument
1487 truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in i40e_rx_frame_truesize()
1489 truesize = i40e_rx_offset(rx_ring) ? in i40e_rx_frame_truesize()
1490 SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) + in i40e_rx_frame_truesize()
1505 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, in i40e_alloc_mapped_page() argument
1513 rx_ring->rx_stats.page_reuse_count++; in i40e_alloc_mapped_page()
1518 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); in i40e_alloc_mapped_page()
1520 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1525 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in i40e_alloc_mapped_page()
1526 i40e_rx_pg_size(rx_ring), in i40e_alloc_mapped_page()
1533 if (dma_mapping_error(rx_ring->dev, dma)) { in i40e_alloc_mapped_page()
1534 __free_pages(page, i40e_rx_pg_order(rx_ring)); in i40e_alloc_mapped_page()
1535 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1541 bi->page_offset = i40e_rx_offset(rx_ring); in i40e_alloc_mapped_page()
1555 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) in i40e_alloc_rx_buffers() argument
1557 u16 ntu = rx_ring->next_to_use; in i40e_alloc_rx_buffers()
1562 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers()
1565 rx_desc = I40E_RX_DESC(rx_ring, ntu); in i40e_alloc_rx_buffers()
1566 bi = i40e_rx_bi(rx_ring, ntu); in i40e_alloc_rx_buffers()
1569 if (!i40e_alloc_mapped_page(rx_ring, bi)) in i40e_alloc_rx_buffers()
1573 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in i40e_alloc_rx_buffers()
1575 rx_ring->rx_buf_len, in i40e_alloc_rx_buffers()
1586 if (unlikely(ntu == rx_ring->count)) { in i40e_alloc_rx_buffers()
1587 rx_desc = I40E_RX_DESC(rx_ring, 0); in i40e_alloc_rx_buffers()
1588 bi = i40e_rx_bi(rx_ring, 0); in i40e_alloc_rx_buffers()
1598 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1599 i40e_release_rx_desc(rx_ring, ntu); in i40e_alloc_rx_buffers()
1604 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1605 i40e_release_rx_desc(rx_ring, ntu); in i40e_alloc_rx_buffers()
1763 void i40e_process_skb_fields(struct i40e_ring *rx_ring, in i40e_process_skb_fields() argument
1776 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); in i40e_process_skb_fields()
1778 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); in i40e_process_skb_fields()
1780 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); in i40e_process_skb_fields()
1782 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_process_skb_fields()
1792 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_process_skb_fields()
1809 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, in i40e_cleanup_headers() argument
1921 static void i40e_add_rx_frag(struct i40e_ring *rx_ring, in i40e_add_rx_frag() argument
1927 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; in i40e_add_rx_frag()
1929 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); in i40e_add_rx_frag()
1952 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, in i40e_get_rx_buffer() argument
1958 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_get_rx_buffer()
1968 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_get_rx_buffer()
1990 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, in i40e_construct_skb() argument
1996 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; in i40e_construct_skb()
2023 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in i40e_construct_skb()
2069 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, in i40e_build_skb() argument
2075 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; in i40e_build_skb()
2120 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, in i40e_put_rx_buffer() argument
2126 i40e_reuse_rx_page(rx_ring, rx_buffer); in i40e_put_rx_buffer()
2129 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in i40e_put_rx_buffer()
2130 i40e_rx_pg_size(rx_ring), in i40e_put_rx_buffer()
2150 static bool i40e_is_non_eop(struct i40e_ring *rx_ring, in i40e_is_non_eop() argument
2154 u32 ntc = rx_ring->next_to_clean + 1; in i40e_is_non_eop()
2157 ntc = (ntc < rx_ring->count) ? ntc : 0; in i40e_is_non_eop()
2158 rx_ring->next_to_clean = ntc; in i40e_is_non_eop()
2160 prefetch(I40E_RX_DESC(rx_ring, ntc)); in i40e_is_non_eop()
2167 rx_ring->rx_stats.non_eop_descs++; in i40e_is_non_eop()
2190 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, in i40e_run_xdp() argument
2199 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in i40e_run_xdp()
2211 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp()
2215 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp()
2222 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2239 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, in i40e_rx_buffer_flip() argument
2243 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size); in i40e_rx_buffer_flip()
2275 void i40e_update_rx_stats(struct i40e_ring *rx_ring, in i40e_update_rx_stats() argument
2279 u64_stats_update_begin(&rx_ring->syncp); in i40e_update_rx_stats()
2280 rx_ring->stats.packets += total_rx_packets; in i40e_update_rx_stats()
2281 rx_ring->stats.bytes += total_rx_bytes; in i40e_update_rx_stats()
2282 u64_stats_update_end(&rx_ring->syncp); in i40e_update_rx_stats()
2283 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_update_rx_stats()
2284 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_update_rx_stats()
2296 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) in i40e_finalize_xdp_rx() argument
2303 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_finalize_xdp_rx()
2313 static void i40e_inc_ntc(struct i40e_ring *rx_ring) in i40e_inc_ntc() argument
2315 u32 ntc = rx_ring->next_to_clean + 1; in i40e_inc_ntc()
2317 ntc = (ntc < rx_ring->count) ? ntc : 0; in i40e_inc_ntc()
2318 rx_ring->next_to_clean = ntc; in i40e_inc_ntc()
2319 prefetch(I40E_RX_DESC(rx_ring, ntc)); in i40e_inc_ntc()
2334 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) in i40e_clean_rx_irq() argument
2337 struct sk_buff *skb = rx_ring->skb; in i40e_clean_rx_irq()
2338 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); in i40e_clean_rx_irq()
2344 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0); in i40e_clean_rx_irq()
2346 xdp.rxq = &rx_ring->xdp_rxq; in i40e_clean_rx_irq()
2358 i40e_alloc_rx_buffers(rx_ring, cleaned_count); in i40e_clean_rx_irq()
2362 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); in i40e_clean_rx_irq()
2378 i40e_clean_programming_status(rx_ring, in i40e_clean_rx_irq()
2381 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_clean_rx_irq()
2382 i40e_inc_ntc(rx_ring); in i40e_clean_rx_irq()
2383 i40e_reuse_rx_page(rx_ring, rx_buffer); in i40e_clean_rx_irq()
2393 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); in i40e_clean_rx_irq()
2394 rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); in i40e_clean_rx_irq()
2402 i40e_rx_offset(rx_ring); in i40e_clean_rx_irq()
2406 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); in i40e_clean_rx_irq()
2408 skb = i40e_run_xdp(rx_ring, &xdp); in i40e_clean_rx_irq()
2416 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); in i40e_clean_rx_irq()
2423 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); in i40e_clean_rx_irq()
2424 } else if (ring_uses_build_skb(rx_ring)) { in i40e_clean_rx_irq()
2425 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); in i40e_clean_rx_irq()
2427 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); in i40e_clean_rx_irq()
2432 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq()
2437 i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); in i40e_clean_rx_irq()
2440 if (i40e_is_non_eop(rx_ring, rx_desc, skb)) in i40e_clean_rx_irq()
2443 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) { in i40e_clean_rx_irq()
2452 i40e_process_skb_fields(rx_ring, rx_desc, skb); in i40e_clean_rx_irq()
2454 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); in i40e_clean_rx_irq()
2455 napi_gro_receive(&rx_ring->q_vector->napi, skb); in i40e_clean_rx_irq()
2462 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); in i40e_clean_rx_irq()
2463 rx_ring->skb = skb; in i40e_clean_rx_irq()
2465 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); in i40e_clean_rx_irq()