Lines Matching refs:rx_ring
70 struct ena_ring *rx_ring);
72 struct ena_ring *rx_ring);
115 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
349 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) in ena_xdp_xmit()
375 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) in ena_xdp_execute() argument
383 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); in ena_xdp_execute()
394 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
395 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
401 xdp_ring = rx_ring->xdp_ring; in ena_xdp_execute()
406 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, in ena_xdp_execute()
411 xdp_stat = &rx_ring->rx_stats.xdp_tx; in ena_xdp_execute()
415 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { in ena_xdp_execute()
416 xdp_stat = &rx_ring->rx_stats.xdp_redirect; in ena_xdp_execute()
420 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
421 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
425 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
426 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
430 xdp_stat = &rx_ring->rx_stats.xdp_drop; in ena_xdp_execute()
434 xdp_stat = &rx_ring->rx_stats.xdp_pass; in ena_xdp_execute()
438 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
439 xdp_stat = &rx_ring->rx_stats.xdp_invalid; in ena_xdp_execute()
443 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); in ena_xdp_execute()
484 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) in ena_xdp_register_rxq_info() argument
488 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); in ena_xdp_register_rxq_info()
491 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info()
493 rx_ring->qid, rc); in ena_xdp_register_rxq_info()
497 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, in ena_xdp_register_rxq_info()
501 netif_err(rx_ring->adapter, ifup, rx_ring->netdev, in ena_xdp_register_rxq_info()
503 rx_ring->qid, rc); in ena_xdp_register_rxq_info()
504 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ena_xdp_register_rxq_info()
511 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) in ena_xdp_unregister_rxq_info() argument
513 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); in ena_xdp_unregister_rxq_info()
514 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ena_xdp_unregister_rxq_info()
522 struct ena_ring *rx_ring; in ena_xdp_exchange_program_rx_in_range() local
526 rx_ring = &adapter->rx_ring[i]; in ena_xdp_exchange_program_rx_in_range()
527 old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog); in ena_xdp_exchange_program_rx_in_range()
530 ena_xdp_register_rxq_info(rx_ring); in ena_xdp_exchange_program_rx_in_range()
531 rx_ring->rx_headroom = XDP_PACKET_HEADROOM; in ena_xdp_exchange_program_rx_in_range()
533 ena_xdp_unregister_rxq_info(rx_ring); in ena_xdp_exchange_program_rx_in_range()
534 rx_ring->rx_headroom = NET_SKB_PAD; in ena_xdp_exchange_program_rx_in_range()
701 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
878 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources() local
882 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
891 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
894 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
895 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
896 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
897 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
901 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
902 rx_ring->free_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
903 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
904 rx_ring->free_ids = vzalloc(size); in ena_setup_rx_resources()
905 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
906 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
907 rx_ring->rx_buffer_info = NULL; in ena_setup_rx_resources()
913 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
914 rx_ring->free_ids[i] = i; in ena_setup_rx_resources()
917 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
919 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
920 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
921 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
922 rx_ring->numa_node = node; in ena_setup_rx_resources()
936 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources() local
938 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
939 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
941 vfree(rx_ring->free_ids); in ena_free_rx_resources()
942 rx_ring->free_ids = NULL; in ena_free_rx_resources()
986 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, in ena_alloc_map_page() argument
996 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, in ena_alloc_map_page()
997 &rx_ring->syncp); in ena_alloc_map_page()
1004 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_map_page()
1006 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { in ena_alloc_map_page()
1007 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, in ena_alloc_map_page()
1008 &rx_ring->syncp); in ena_alloc_map_page()
1016 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, in ena_alloc_rx_buffer() argument
1019 int headroom = rx_ring->rx_headroom; in ena_alloc_rx_buffer()
1033 page = ena_alloc_map_page(rx_ring, &dma); in ena_alloc_rx_buffer()
1037 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_buffer()
1052 static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring, in ena_unmap_rx_buff_attrs() argument
1056 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, in ena_unmap_rx_buff_attrs()
1060 static void ena_free_rx_page(struct ena_ring *rx_ring, in ena_free_rx_page() argument
1066 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
1071 ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0); in ena_free_rx_page()
1077 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
1083 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
1088 req_id = rx_ring->free_ids[next_to_use]; in ena_refill_rx_bufs()
1090 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
1092 rc = ena_alloc_rx_buffer(rx_ring, rx_info); in ena_refill_rx_bufs()
1094 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
1096 rx_ring->qid); in ena_refill_rx_bufs()
1099 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
1103 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
1105 rx_ring->qid); in ena_refill_rx_bufs()
1109 rx_ring->ring_size); in ena_refill_rx_bufs()
1113 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, in ena_refill_rx_bufs()
1114 &rx_ring->syncp); in ena_refill_rx_bufs()
1115 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
1117 rx_ring->qid, i, num); in ena_refill_rx_bufs()
1122 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
1124 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
1132 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs() local
1135 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
1136 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
1139 ena_free_rx_page(rx_ring, rx_info); in ena_free_rx_bufs()
1148 struct ena_ring *rx_ring; in ena_refill_all_rx_bufs() local
1152 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
1153 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
1154 rc = ena_refill_rx_bufs(rx_ring, bufs_num); in ena_refill_all_rx_bufs()
1157 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
1409 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len) in ena_alloc_skb() argument
1414 skb = napi_alloc_skb(rx_ring->napi, len); in ena_alloc_skb()
1419 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, in ena_alloc_skb()
1420 &rx_ring->syncp); in ena_alloc_skb()
1422 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
1449 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, in ena_rx_skb() argument
1455 bool is_xdp_loaded = ena_xdp_present_ring(rx_ring); in ena_rx_skb()
1470 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1473 adapter = rx_ring->adapter; in ena_rx_skb()
1474 netif_err(adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
1475 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); in ena_rx_skb()
1476 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); in ena_rx_skb()
1481 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1486 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
1490 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
1491 skb = ena_alloc_skb(rx_ring, NULL, len); in ena_rx_skb()
1496 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1501 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
1507 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1509 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1510 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1512 rx_ring->ring_size); in ena_rx_skb()
1524 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1530 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); in ena_rx_skb()
1532 skb = ena_alloc_skb(rx_ring, buf_addr, buf_len); in ena_rx_skb()
1539 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1542 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1549 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1552 rx_ring->ring_size); in ena_rx_skb()
1560 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1564 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
1573 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1579 ena_unmap_rx_buff_attrs(rx_ring, rx_info, in ena_rx_skb()
1595 static void ena_rx_checksum(struct ena_ring *rx_ring, in ena_rx_checksum() argument
1600 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
1616 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1617 &rx_ring->syncp); in ena_rx_checksum()
1618 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1628 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1629 &rx_ring->syncp); in ena_rx_checksum()
1630 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1638 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, in ena_rx_checksum()
1639 &rx_ring->syncp); in ena_rx_checksum()
1641 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, in ena_rx_checksum()
1642 &rx_ring->syncp); in ena_rx_checksum()
1652 static void ena_set_rx_hash(struct ena_ring *rx_ring, in ena_set_rx_hash() argument
1658 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1674 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) in ena_xdp_handle_buff() argument
1679 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_xdp_handle_buff()
1682 rx_ring->ena_bufs[0].len, false); in ena_xdp_handle_buff()
1686 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) in ena_xdp_handle_buff()
1689 ret = ena_xdp_execute(rx_ring, xdp); in ena_xdp_handle_buff()
1694 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; in ena_xdp_handle_buff()
1706 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, in ena_clean_rx_irq() argument
1709 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1725 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1726 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1728 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); in ena_clean_rx_irq()
1733 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1734 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1737 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1738 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1747 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_clean_rx_irq()
1750 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1752 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1755 if (ena_xdp_present_ring(rx_ring)) in ena_clean_rx_irq()
1756 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); in ena_clean_rx_irq()
1760 skb = ena_rx_skb(rx_ring, in ena_clean_rx_irq()
1761 rx_ring->ena_bufs, in ena_clean_rx_irq()
1767 int req_id = rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1769 rx_ring->free_ids[next_to_clean] = req_id; in ena_clean_rx_irq()
1772 rx_ring->ring_size); in ena_clean_rx_irq()
1778 ena_unmap_rx_buff_attrs(rx_ring, in ena_clean_rx_irq()
1779 &rx_ring->rx_buffer_info[req_id], in ena_clean_rx_irq()
1781 rx_ring->rx_buffer_info[req_id].page = NULL; in ena_clean_rx_irq()
1793 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1795 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1797 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1799 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) in ena_clean_rx_irq()
1810 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1811 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1812 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1813 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1814 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1815 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1817 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1819 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1821 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, in ena_clean_rx_irq()
1826 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); in ena_clean_rx_irq()
1827 ena_refill_rx_bufs(rx_ring, refill_required); in ena_clean_rx_irq()
1839 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1842 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, in ena_clean_rx_irq()
1843 &rx_ring->syncp); in ena_clean_rx_irq()
1846 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, in ena_clean_rx_irq()
1847 &rx_ring->syncp); in ena_clean_rx_irq()
1860 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; in ena_dim_work()
1867 struct ena_ring *rx_ring = ena_napi->rx_ring; in ena_adjust_adaptive_rx_intr_moderation() local
1869 if (!rx_ring->per_napi_packets) in ena_adjust_adaptive_rx_intr_moderation()
1872 rx_ring->non_empty_napi_events++; in ena_adjust_adaptive_rx_intr_moderation()
1874 dim_update_sample(rx_ring->non_empty_napi_events, in ena_adjust_adaptive_rx_intr_moderation()
1875 rx_ring->rx_stats.cnt, in ena_adjust_adaptive_rx_intr_moderation()
1876 rx_ring->rx_stats.bytes, in ena_adjust_adaptive_rx_intr_moderation()
1881 rx_ring->per_napi_packets = 0; in ena_adjust_adaptive_rx_intr_moderation()
1885 struct ena_ring *rx_ring) in ena_unmask_interrupt() argument
1893 if (rx_ring) in ena_unmask_interrupt()
1894 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? in ena_unmask_interrupt()
1895 rx_ring->smoothed_interval : in ena_unmask_interrupt()
1896 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); in ena_unmask_interrupt()
1918 struct ena_ring *rx_ring) in ena_update_ring_numa_node() argument
1928 if (rx_ring) in ena_update_ring_numa_node()
1929 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1941 if (rx_ring) { in ena_update_ring_numa_node()
1942 rx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1943 ena_com_update_numa_node(rx_ring->ena_com_io_cq, in ena_update_ring_numa_node()
2017 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
2025 rx_ring = ena_napi->rx_ring; in ena_io_poll()
2040 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); in ena_io_poll()
2063 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
2066 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
2067 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
2347 napi->rx_ring = &adapter->rx_ring[i]; in ena_init_napi_in_range()
2504 struct ena_ring *rx_ring; in ena_create_io_rx_queue() local
2511 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
2521 ctx.queue_size = rx_ring->ring_size; in ena_create_io_rx_queue()
2522 ctx.numa_node = rx_ring->numa_node; in ena_create_io_rx_queue()
2533 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
2534 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
2542 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
2581 adapter->rx_ring[i].ring_size = new_rx_size; in set_io_rings_size()
2655 cur_rx_ring_size = adapter->rx_ring[0].ring_size; in create_queues_with_size_backoff()
2729 &adapter->rx_ring[i]); in ena_up()
2901 struct ena_ring *rx_ring; in ena_set_rx_copybreak() local
2910 rx_ring = &adapter->rx_ring[i]; in ena_set_rx_copybreak()
2911 rx_ring->rx_copybreak = rx_copybreak; in ena_set_rx_copybreak()
3366 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
3389 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
3392 start = u64_stats_fetch_begin(&rx_ring->syncp); in ena_get_stats64()
3393 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
3394 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
3395 } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); in ena_get_stats64()
3454 if (adapter->rx_ring->ring_size) in ena_calc_io_queue_size()
3455 rx_queue_size = adapter->rx_ring->ring_size; in ena_calc_io_queue_size()
3909 struct ena_ring *rx_ring) in check_for_rx_interrupt_queue() argument
3911 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); in check_for_rx_interrupt_queue()
3916 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
3919 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
3921 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
3924 rx_ring->qid); in check_for_rx_interrupt_queue()
4002 struct ena_ring *rx_ring; in check_for_missing_completions() local
4023 rx_ring = &adapter->rx_ring[i]; in check_for_missing_completions()
4030 check_for_rx_interrupt_queue(adapter, rx_ring) : 0; in check_for_missing_completions()
4060 struct ena_ring *rx_ring; in check_for_empty_rx_ring() local
4070 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
4072 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
4073 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
4074 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
4076 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
4077 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, in check_for_empty_rx_ring()
4078 &rx_ring->syncp); in check_for_empty_rx_ring()
4083 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
4084 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
4087 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()