Lines Matching refs:rx_ring

73 				 struct ena_ring *rx_ring);
75 struct ena_ring *rx_ring);
105 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
335 static int ena_xdp_execute(struct ena_ring *rx_ring, in ena_xdp_execute() argument
344 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); in ena_xdp_execute()
352 ena_xdp_xmit_buff(rx_ring->netdev, in ena_xdp_execute()
354 rx_ring->qid + rx_ring->adapter->num_io_queues, in ena_xdp_execute()
357 xdp_stat = &rx_ring->rx_stats.xdp_tx; in ena_xdp_execute()
359 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
360 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
362 xdp_stat = &rx_ring->rx_stats.xdp_drop; in ena_xdp_execute()
364 xdp_stat = &rx_ring->rx_stats.xdp_pass; in ena_xdp_execute()
367 xdp_stat = &rx_ring->rx_stats.xdp_invalid; in ena_xdp_execute()
370 u64_stats_update_begin(&rx_ring->syncp); in ena_xdp_execute()
372 u64_stats_update_end(&rx_ring->syncp); in ena_xdp_execute()
415 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) in ena_xdp_register_rxq_info() argument
419 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid); in ena_xdp_register_rxq_info()
422 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info()
424 rx_ring->qid, rc); in ena_xdp_register_rxq_info()
428 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, in ena_xdp_register_rxq_info()
432 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info()
434 rx_ring->qid, rc); in ena_xdp_register_rxq_info()
435 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ena_xdp_register_rxq_info()
442 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) in ena_xdp_unregister_rxq_info() argument
444 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); in ena_xdp_unregister_rxq_info()
445 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ena_xdp_unregister_rxq_info()
452 struct ena_ring *rx_ring; in ena_xdp_exchange_program_rx_in_range() local
456 rx_ring = &adapter->rx_ring[i]; in ena_xdp_exchange_program_rx_in_range()
457 xchg(&rx_ring->xdp_bpf_prog, prog); in ena_xdp_exchange_program_rx_in_range()
459 ena_xdp_register_rxq_info(rx_ring); in ena_xdp_exchange_program_rx_in_range()
460 rx_ring->rx_headroom = XDP_PACKET_HEADROOM; in ena_xdp_exchange_program_rx_in_range()
462 ena_xdp_unregister_rxq_info(rx_ring); in ena_xdp_exchange_program_rx_in_range()
463 rx_ring->rx_headroom = 0; in ena_xdp_exchange_program_rx_in_range()
628 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
801 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources() local
805 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
814 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
817 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
818 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
819 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
820 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
824 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
825 rx_ring->free_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
826 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
827 rx_ring->free_ids = vzalloc(size); in ena_setup_rx_resources()
828 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
829 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
830 rx_ring->rx_buffer_info = NULL; in ena_setup_rx_resources()
836 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
837 rx_ring->free_ids[i] = i; in ena_setup_rx_resources()
840 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
842 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
843 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
844 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
858 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources() local
860 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
861 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
863 vfree(rx_ring->free_ids); in ena_free_rx_resources()
864 rx_ring->free_ids = NULL; in ena_free_rx_resources()
908 static int ena_alloc_rx_page(struct ena_ring *rx_ring, in ena_alloc_rx_page() argument
911 int headroom = rx_ring->rx_headroom; in ena_alloc_rx_page()
925 u64_stats_update_begin(&rx_ring->syncp); in ena_alloc_rx_page()
926 rx_ring->rx_stats.page_alloc_fail++; in ena_alloc_rx_page()
927 u64_stats_update_end(&rx_ring->syncp); in ena_alloc_rx_page()
934 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_rx_page()
936 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { in ena_alloc_rx_page()
937 u64_stats_update_begin(&rx_ring->syncp); in ena_alloc_rx_page()
938 rx_ring->rx_stats.dma_mapping_err++; in ena_alloc_rx_page()
939 u64_stats_update_end(&rx_ring->syncp); in ena_alloc_rx_page()
944 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_page()
955 static void ena_free_rx_page(struct ena_ring *rx_ring, in ena_free_rx_page() argument
962 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
967 dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, in ena_free_rx_page()
975 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
981 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
986 req_id = rx_ring->free_ids[next_to_use]; in ena_refill_rx_bufs()
988 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
990 rc = ena_alloc_rx_page(rx_ring, rx_info, in ena_refill_rx_bufs()
993 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
995 rx_ring->qid); in ena_refill_rx_bufs()
998 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
1002 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
1004 rx_ring->qid); in ena_refill_rx_bufs()
1008 rx_ring->ring_size); in ena_refill_rx_bufs()
1012 u64_stats_update_begin(&rx_ring->syncp); in ena_refill_rx_bufs()
1013 rx_ring->rx_stats.refil_partial++; in ena_refill_rx_bufs()
1014 u64_stats_update_end(&rx_ring->syncp); in ena_refill_rx_bufs()
1015 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
1017 rx_ring->qid, i, num); in ena_refill_rx_bufs()
1022 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
1024 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
1032 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs() local
1035 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
1036 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
1039 ena_free_rx_page(rx_ring, rx_info); in ena_free_rx_bufs()
1048 struct ena_ring *rx_ring; in ena_refill_all_rx_bufs() local
1052 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
1053 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
1054 rc = ena_refill_rx_bufs(rx_ring, bufs_num); in ena_refill_all_rx_bufs()
1057 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
1313 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) in ena_alloc_skb() argument
1318 skb = napi_get_frags(rx_ring->napi); in ena_alloc_skb()
1320 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in ena_alloc_skb()
1321 rx_ring->rx_copybreak); in ena_alloc_skb()
1324 u64_stats_update_begin(&rx_ring->syncp); in ena_alloc_skb()
1325 rx_ring->rx_stats.skb_alloc_fail++; in ena_alloc_skb()
1326 u64_stats_update_end(&rx_ring->syncp); in ena_alloc_skb()
1327 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
1335 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, in ena_rx_skb() argument
1348 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1351 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
1356 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1365 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
1366 skb = ena_alloc_skb(rx_ring, false); in ena_rx_skb()
1370 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1375 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1380 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
1386 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1387 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1389 rx_ring->ring_size); in ena_rx_skb()
1393 skb = ena_alloc_skb(rx_ring, true); in ena_rx_skb()
1398 dma_unmap_page(rx_ring->dev, in ena_rx_skb()
1405 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1411 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1414 rx_ring->ring_size); in ena_rx_skb()
1422 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1433 static void ena_rx_checksum(struct ena_ring *rx_ring, in ena_rx_checksum() argument
1438 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
1454 u64_stats_update_begin(&rx_ring->syncp); in ena_rx_checksum()
1455 rx_ring->rx_stats.bad_csum++; in ena_rx_checksum()
1456 u64_stats_update_end(&rx_ring->syncp); in ena_rx_checksum()
1457 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1467 u64_stats_update_begin(&rx_ring->syncp); in ena_rx_checksum()
1468 rx_ring->rx_stats.bad_csum++; in ena_rx_checksum()
1469 u64_stats_update_end(&rx_ring->syncp); in ena_rx_checksum()
1470 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1478 u64_stats_update_begin(&rx_ring->syncp); in ena_rx_checksum()
1479 rx_ring->rx_stats.csum_good++; in ena_rx_checksum()
1480 u64_stats_update_end(&rx_ring->syncp); in ena_rx_checksum()
1482 u64_stats_update_begin(&rx_ring->syncp); in ena_rx_checksum()
1483 rx_ring->rx_stats.csum_unchecked++; in ena_rx_checksum()
1484 u64_stats_update_end(&rx_ring->syncp); in ena_rx_checksum()
1494 static void ena_set_rx_hash(struct ena_ring *rx_ring, in ena_set_rx_hash() argument
1500 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1516 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) in ena_xdp_handle_buff() argument
1521 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_xdp_handle_buff()
1525 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len; in ena_xdp_handle_buff()
1529 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) in ena_xdp_handle_buff()
1532 ret = ena_xdp_execute(rx_ring, xdp, rx_info); in ena_xdp_handle_buff()
1537 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; in ena_xdp_handle_buff()
1549 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, in ena_clean_rx_irq() argument
1552 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1567 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1568 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1570 xdp.rxq = &rx_ring->xdp_rxq; in ena_clean_rx_irq()
1576 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1577 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1580 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1581 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1590 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_clean_rx_irq()
1593 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1595 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1598 if (ena_xdp_present_ring(rx_ring)) in ena_clean_rx_irq()
1599 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); in ena_clean_rx_irq()
1603 skb = ena_rx_skb(rx_ring, in ena_clean_rx_irq()
1604 rx_ring->ena_bufs, in ena_clean_rx_irq()
1615 ena_free_rx_page(rx_ring, in ena_clean_rx_irq()
1616 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]); in ena_clean_rx_irq()
1618 rx_ring->free_ids[next_to_clean] = in ena_clean_rx_irq()
1619 rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1622 rx_ring->ring_size); in ena_clean_rx_irq()
1631 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1633 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1635 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1637 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { in ena_clean_rx_irq()
1638 total_len += rx_ring->ena_bufs[0].len; in ena_clean_rx_irq()
1650 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1651 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1652 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1653 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1654 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1655 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1657 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1659 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1661 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, in ena_clean_rx_irq()
1666 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); in ena_clean_rx_irq()
1667 ena_refill_rx_bufs(rx_ring, refill_required); in ena_clean_rx_irq()
1673 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1676 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1677 rx_ring->rx_stats.bad_desc_num++; in ena_clean_rx_irq()
1678 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1681 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1682 rx_ring->rx_stats.bad_req_id++; in ena_clean_rx_irq()
1683 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1699 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; in ena_dim_work()
1706 struct ena_ring *rx_ring = ena_napi->rx_ring; in ena_adjust_adaptive_rx_intr_moderation() local
1708 if (!rx_ring->per_napi_packets) in ena_adjust_adaptive_rx_intr_moderation()
1711 rx_ring->non_empty_napi_events++; in ena_adjust_adaptive_rx_intr_moderation()
1713 dim_update_sample(rx_ring->non_empty_napi_events, in ena_adjust_adaptive_rx_intr_moderation()
1714 rx_ring->rx_stats.cnt, in ena_adjust_adaptive_rx_intr_moderation()
1715 rx_ring->rx_stats.bytes, in ena_adjust_adaptive_rx_intr_moderation()
1720 rx_ring->per_napi_packets = 0; in ena_adjust_adaptive_rx_intr_moderation()
1724 struct ena_ring *rx_ring) in ena_unmask_interrupt() argument
1731 if (rx_ring) in ena_unmask_interrupt()
1732 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? in ena_unmask_interrupt()
1733 rx_ring->smoothed_interval : in ena_unmask_interrupt()
1734 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); in ena_unmask_interrupt()
1757 struct ena_ring *rx_ring) in ena_update_ring_numa_node() argument
1771 if (rx_ring) in ena_update_ring_numa_node()
1772 ena_com_update_numa_node(rx_ring->ena_com_io_cq, in ena_update_ring_numa_node()
1777 if (rx_ring) in ena_update_ring_numa_node()
1778 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1846 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1854 rx_ring = ena_napi->rx_ring; in ena_io_poll()
1857 rx_ring->first_interrupt = ena_napi->first_interrupt; in ena_io_poll()
1872 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); in ena_io_poll()
1895 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
1898 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1901 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
2179 napi->rx_ring = &adapter->rx_ring[i]; in ena_init_napi_in_range()
2336 struct ena_ring *rx_ring; in ena_create_io_rx_queue() local
2343 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
2353 ctx.queue_size = rx_ring->ring_size; in ena_create_io_rx_queue()
2354 ctx.numa_node = cpu_to_node(rx_ring->cpu); in ena_create_io_rx_queue()
2365 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
2366 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
2374 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
2413 adapter->rx_ring[i].ring_size = new_rx_size; in set_io_rings_size()
2487 cur_rx_ring_size = adapter->rx_ring[0].ring_size; in create_queues_with_size_backoff()
2562 &adapter->rx_ring[i]); in ena_up()
3176 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
3199 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
3202 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); in ena_get_stats64()
3203 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
3204 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
3205 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); in ena_get_stats64()
3598 struct ena_ring *rx_ring) in check_for_rx_interrupt_queue() argument
3600 if (likely(rx_ring->first_interrupt)) in check_for_rx_interrupt_queue()
3603 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
3606 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
3608 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
3611 rx_ring->qid); in check_for_rx_interrupt_queue()
3684 struct ena_ring *rx_ring; in check_for_missing_completions() local
3705 rx_ring = &adapter->rx_ring[i]; in check_for_missing_completions()
3712 check_for_rx_interrupt_queue(adapter, rx_ring) : 0; in check_for_missing_completions()
3742 struct ena_ring *rx_ring; in check_for_empty_rx_ring() local
3752 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
3754 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
3755 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
3756 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
3758 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
3759 u64_stats_update_begin(&rx_ring->syncp); in check_for_empty_rx_ring()
3760 rx_ring->rx_stats.empty_rx_ring++; in check_for_empty_rx_ring()
3761 u64_stats_update_end(&rx_ring->syncp); in check_for_empty_rx_ring()
3766 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
3767 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3770 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()