Lines Matching refs:rx_ring

72 				 struct ena_ring *rx_ring);
74 struct ena_ring *rx_ring);
117 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
353 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) in ena_xdp_xmit()
379 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) in ena_xdp_execute() argument
387 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); in ena_xdp_execute()
398 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
399 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
405 xdp_ring = rx_ring->xdp_ring; in ena_xdp_execute()
410 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, in ena_xdp_execute()
415 xdp_stat = &rx_ring->rx_stats.xdp_tx; in ena_xdp_execute()
418 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { in ena_xdp_execute()
419 xdp_stat = &rx_ring->rx_stats.xdp_redirect; in ena_xdp_execute()
422 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
423 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
427 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
428 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
431 xdp_stat = &rx_ring->rx_stats.xdp_drop; in ena_xdp_execute()
434 xdp_stat = &rx_ring->rx_stats.xdp_pass; in ena_xdp_execute()
438 xdp_stat = &rx_ring->rx_stats.xdp_invalid; in ena_xdp_execute()
441 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); in ena_xdp_execute()
482 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) in ena_xdp_register_rxq_info() argument
486 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); in ena_xdp_register_rxq_info()
489 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info()
491 rx_ring->qid, rc); in ena_xdp_register_rxq_info()
495 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, in ena_xdp_register_rxq_info()
499 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info()
501 rx_ring->qid, rc); in ena_xdp_register_rxq_info()
502 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ena_xdp_register_rxq_info()
509 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) in ena_xdp_unregister_rxq_info() argument
511 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); in ena_xdp_unregister_rxq_info()
512 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ena_xdp_unregister_rxq_info()
519 struct ena_ring *rx_ring; in ena_xdp_exchange_program_rx_in_range() local
523 rx_ring = &adapter->rx_ring[i]; in ena_xdp_exchange_program_rx_in_range()
524 xchg(&rx_ring->xdp_bpf_prog, prog); in ena_xdp_exchange_program_rx_in_range()
526 ena_xdp_register_rxq_info(rx_ring); in ena_xdp_exchange_program_rx_in_range()
527 rx_ring->rx_headroom = XDP_PACKET_HEADROOM; in ena_xdp_exchange_program_rx_in_range()
529 ena_xdp_unregister_rxq_info(rx_ring); in ena_xdp_exchange_program_rx_in_range()
530 rx_ring->rx_headroom = NET_SKB_PAD; in ena_xdp_exchange_program_rx_in_range()
694 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
870 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources() local
874 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
883 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
886 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
887 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
888 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
889 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
893 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
894 rx_ring->free_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
895 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
896 rx_ring->free_ids = vzalloc(size); in ena_setup_rx_resources()
897 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
898 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
899 rx_ring->rx_buffer_info = NULL; in ena_setup_rx_resources()
905 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
906 rx_ring->free_ids[i] = i; in ena_setup_rx_resources()
909 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
911 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
912 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
913 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
927 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources() local
929 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
930 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
932 vfree(rx_ring->free_ids); in ena_free_rx_resources()
933 rx_ring->free_ids = NULL; in ena_free_rx_resources()
977 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, in ena_alloc_map_page() argument
987 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, in ena_alloc_map_page()
988 &rx_ring->syncp); in ena_alloc_map_page()
995 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_map_page()
997 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { in ena_alloc_map_page()
998 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, in ena_alloc_map_page()
999 &rx_ring->syncp); in ena_alloc_map_page()
1007 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, in ena_alloc_rx_buffer() argument
1010 int headroom = rx_ring->rx_headroom; in ena_alloc_rx_buffer()
1024 page = ena_alloc_map_page(rx_ring, &dma); in ena_alloc_rx_buffer()
1028 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_buffer()
1041 static void ena_unmap_rx_buff(struct ena_ring *rx_ring, in ena_unmap_rx_buff() argument
1046 dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, in ena_unmap_rx_buff()
1051 static void ena_free_rx_page(struct ena_ring *rx_ring, in ena_free_rx_page() argument
1057 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
1062 ena_unmap_rx_buff(rx_ring, rx_info); in ena_free_rx_page()
1068 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
1074 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
1079 req_id = rx_ring->free_ids[next_to_use]; in ena_refill_rx_bufs()
1081 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
1083 rc = ena_alloc_rx_buffer(rx_ring, rx_info); in ena_refill_rx_bufs()
1085 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
1087 rx_ring->qid); in ena_refill_rx_bufs()
1090 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
1094 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
1096 rx_ring->qid); in ena_refill_rx_bufs()
1100 rx_ring->ring_size); in ena_refill_rx_bufs()
1104 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, in ena_refill_rx_bufs()
1105 &rx_ring->syncp); in ena_refill_rx_bufs()
1106 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
1108 rx_ring->qid, i, num); in ena_refill_rx_bufs()
1113 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
1115 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
1123 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs() local
1126 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
1127 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
1130 ena_free_rx_page(rx_ring, rx_info); in ena_free_rx_bufs()
1139 struct ena_ring *rx_ring; in ena_refill_all_rx_bufs() local
1143 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
1144 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
1145 rc = ena_refill_rx_bufs(rx_ring, bufs_num); in ena_refill_all_rx_bufs()
1148 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
1401 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag) in ena_alloc_skb() argument
1406 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in ena_alloc_skb()
1407 rx_ring->rx_copybreak); in ena_alloc_skb()
1412 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, in ena_alloc_skb()
1413 &rx_ring->syncp); in ena_alloc_skb()
1415 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
1424 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, in ena_rx_skb() argument
1439 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1442 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
1447 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1458 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
1459 skb = ena_alloc_skb(rx_ring, NULL); in ena_rx_skb()
1463 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1468 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1473 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
1479 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1480 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1482 rx_ring->ring_size); in ena_rx_skb()
1486 ena_unmap_rx_buff(rx_ring, rx_info); in ena_rx_skb()
1488 skb = ena_alloc_skb(rx_ring, page_addr); in ena_rx_skb()
1495 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1498 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1504 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1507 rx_ring->ring_size); in ena_rx_skb()
1515 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1517 ena_unmap_rx_buff(rx_ring, rx_info); in ena_rx_skb()
1532 static void ena_rx_checksum(struct ena_ring *rx_ring, in ena_rx_checksum() argument
1537 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
1553 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, in ena_rx_checksum()
1554 &rx_ring->syncp); in ena_rx_checksum()
1555 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1565 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, in ena_rx_checksum()
1566 &rx_ring->syncp); in ena_rx_checksum()
1567 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1575 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, in ena_rx_checksum()
1576 &rx_ring->syncp); in ena_rx_checksum()
1578 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, in ena_rx_checksum()
1579 &rx_ring->syncp); in ena_rx_checksum()
1589 static void ena_set_rx_hash(struct ena_ring *rx_ring, in ena_set_rx_hash() argument
1595 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1611 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) in ena_xdp_handle_buff() argument
1616 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_xdp_handle_buff()
1619 rx_ring->ena_bufs[0].len, false); in ena_xdp_handle_buff()
1623 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) in ena_xdp_handle_buff()
1626 ret = ena_xdp_execute(rx_ring, xdp); in ena_xdp_handle_buff()
1631 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; in ena_xdp_handle_buff()
1643 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, in ena_clean_rx_irq() argument
1646 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1662 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1663 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1665 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); in ena_clean_rx_irq()
1670 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1671 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1674 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1675 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1684 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_clean_rx_irq()
1687 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1689 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1692 if (ena_xdp_present_ring(rx_ring)) in ena_clean_rx_irq()
1693 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); in ena_clean_rx_irq()
1697 skb = ena_rx_skb(rx_ring, in ena_clean_rx_irq()
1698 rx_ring->ena_bufs, in ena_clean_rx_irq()
1704 int req_id = rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1706 rx_ring->free_ids[next_to_clean] = req_id; in ena_clean_rx_irq()
1709 rx_ring->ring_size); in ena_clean_rx_irq()
1715 ena_unmap_rx_buff(rx_ring, in ena_clean_rx_irq()
1716 &rx_ring->rx_buffer_info[req_id]); in ena_clean_rx_irq()
1717 rx_ring->rx_buffer_info[req_id].page = NULL; in ena_clean_rx_irq()
1728 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1730 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1732 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1734 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) in ena_clean_rx_irq()
1745 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1746 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1747 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1748 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1749 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1750 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1752 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1754 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1756 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, in ena_clean_rx_irq()
1761 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); in ena_clean_rx_irq()
1762 ena_refill_rx_bufs(rx_ring, refill_required); in ena_clean_rx_irq()
1771 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1774 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, in ena_clean_rx_irq()
1775 &rx_ring->syncp); in ena_clean_rx_irq()
1778 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, in ena_clean_rx_irq()
1779 &rx_ring->syncp); in ena_clean_rx_irq()
1795 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; in ena_dim_work()
1802 struct ena_ring *rx_ring = ena_napi->rx_ring; in ena_adjust_adaptive_rx_intr_moderation() local
1804 if (!rx_ring->per_napi_packets) in ena_adjust_adaptive_rx_intr_moderation()
1807 rx_ring->non_empty_napi_events++; in ena_adjust_adaptive_rx_intr_moderation()
1809 dim_update_sample(rx_ring->non_empty_napi_events, in ena_adjust_adaptive_rx_intr_moderation()
1810 rx_ring->rx_stats.cnt, in ena_adjust_adaptive_rx_intr_moderation()
1811 rx_ring->rx_stats.bytes, in ena_adjust_adaptive_rx_intr_moderation()
1816 rx_ring->per_napi_packets = 0; in ena_adjust_adaptive_rx_intr_moderation()
1820 struct ena_ring *rx_ring) in ena_unmask_interrupt() argument
1827 if (rx_ring) in ena_unmask_interrupt()
1828 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? in ena_unmask_interrupt()
1829 rx_ring->smoothed_interval : in ena_unmask_interrupt()
1830 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); in ena_unmask_interrupt()
1852 struct ena_ring *rx_ring) in ena_update_ring_numa_node() argument
1866 if (rx_ring) in ena_update_ring_numa_node()
1867 ena_com_update_numa_node(rx_ring->ena_com_io_cq, in ena_update_ring_numa_node()
1872 if (rx_ring) in ena_update_ring_numa_node()
1873 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1941 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1949 rx_ring = ena_napi->rx_ring; in ena_io_poll()
1964 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); in ena_io_poll()
1987 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
1990 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1993 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
2274 napi->rx_ring = &adapter->rx_ring[i]; in ena_init_napi_in_range()
2431 struct ena_ring *rx_ring; in ena_create_io_rx_queue() local
2438 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
2448 ctx.queue_size = rx_ring->ring_size; in ena_create_io_rx_queue()
2449 ctx.numa_node = cpu_to_node(rx_ring->cpu); in ena_create_io_rx_queue()
2460 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
2461 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
2469 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
2508 adapter->rx_ring[i].ring_size = new_rx_size; in set_io_rings_size()
2582 cur_rx_ring_size = adapter->rx_ring[0].ring_size; in create_queues_with_size_backoff()
2656 &adapter->rx_ring[i]); in ena_up()
3258 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
3281 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
3284 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); in ena_get_stats64()
3285 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
3286 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
3287 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); in ena_get_stats64()
3681 struct ena_ring *rx_ring) in check_for_rx_interrupt_queue() argument
3683 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); in check_for_rx_interrupt_queue()
3688 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
3691 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
3693 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
3696 rx_ring->qid); in check_for_rx_interrupt_queue()
3779 struct ena_ring *rx_ring; in check_for_missing_completions() local
3800 rx_ring = &adapter->rx_ring[i]; in check_for_missing_completions()
3807 check_for_rx_interrupt_queue(adapter, rx_ring) : 0; in check_for_missing_completions()
3837 struct ena_ring *rx_ring; in check_for_empty_rx_ring() local
3847 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
3849 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
3850 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
3851 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
3853 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
3854 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, in check_for_empty_rx_ring()
3855 &rx_ring->syncp); in check_for_empty_rx_ring()
3860 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
3861 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3864 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()