Lines Matching refs:rx_ring
106 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
179 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
312 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id) in validate_rx_req_id() argument
314 if (likely(req_id < rx_ring->ring_size)) in validate_rx_req_id()
317 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, in validate_rx_req_id()
320 u64_stats_update_begin(&rx_ring->syncp); in validate_rx_req_id()
321 rx_ring->rx_stats.bad_req_id++; in validate_rx_req_id()
322 u64_stats_update_end(&rx_ring->syncp); in validate_rx_req_id()
325 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; in validate_rx_req_id()
326 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags); in validate_rx_req_id()
339 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources() local
343 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
352 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
355 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
356 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
357 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
358 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
362 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
363 rx_ring->free_rx_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
364 if (!rx_ring->free_rx_ids) { in ena_setup_rx_resources()
365 rx_ring->free_rx_ids = vzalloc(size); in ena_setup_rx_resources()
366 if (!rx_ring->free_rx_ids) { in ena_setup_rx_resources()
367 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
373 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
374 rx_ring->free_rx_ids[i] = i; in ena_setup_rx_resources()
377 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
379 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
380 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
381 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
395 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources() local
397 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
398 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
400 vfree(rx_ring->free_rx_ids); in ena_free_rx_resources()
401 rx_ring->free_rx_ids = NULL; in ena_free_rx_resources()
445 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, in ena_alloc_rx_page() argument
458 u64_stats_update_begin(&rx_ring->syncp); in ena_alloc_rx_page()
459 rx_ring->rx_stats.page_alloc_fail++; in ena_alloc_rx_page()
460 u64_stats_update_end(&rx_ring->syncp); in ena_alloc_rx_page()
464 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_rx_page()
466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { in ena_alloc_rx_page()
467 u64_stats_update_begin(&rx_ring->syncp); in ena_alloc_rx_page()
468 rx_ring->rx_stats.dma_mapping_err++; in ena_alloc_rx_page()
469 u64_stats_update_end(&rx_ring->syncp); in ena_alloc_rx_page()
474 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_page()
486 static void ena_free_rx_page(struct ena_ring *rx_ring, in ena_free_rx_page() argument
493 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
498 dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE, in ena_free_rx_page()
505 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
511 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
516 req_id = rx_ring->free_rx_ids[next_to_use]; in ena_refill_rx_bufs()
517 rc = validate_rx_req_id(rx_ring, req_id); in ena_refill_rx_bufs()
521 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
524 rc = ena_alloc_rx_page(rx_ring, rx_info, in ena_refill_rx_bufs()
527 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
529 rx_ring->qid); in ena_refill_rx_bufs()
532 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
536 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
538 rx_ring->qid); in ena_refill_rx_bufs()
542 rx_ring->ring_size); in ena_refill_rx_bufs()
546 u64_stats_update_begin(&rx_ring->syncp); in ena_refill_rx_bufs()
547 rx_ring->rx_stats.refil_partial++; in ena_refill_rx_bufs()
548 u64_stats_update_end(&rx_ring->syncp); in ena_refill_rx_bufs()
549 netdev_warn(rx_ring->netdev, in ena_refill_rx_bufs()
551 rx_ring->qid, i, num); in ena_refill_rx_bufs()
556 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
558 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
566 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs() local
569 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
570 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
573 ena_free_rx_page(rx_ring, rx_info); in ena_free_rx_bufs()
583 struct ena_ring *rx_ring; in ena_refill_all_rx_bufs() local
587 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
588 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
589 rc = ena_refill_rx_bufs(rx_ring, bufs_num); in ena_refill_all_rx_bufs()
592 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
829 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) in ena_alloc_skb() argument
834 skb = napi_get_frags(rx_ring->napi); in ena_alloc_skb()
836 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, in ena_alloc_skb()
837 rx_ring->rx_copybreak); in ena_alloc_skb()
840 u64_stats_update_begin(&rx_ring->syncp); in ena_alloc_skb()
841 rx_ring->rx_stats.skb_alloc_fail++; in ena_alloc_skb()
842 u64_stats_update_end(&rx_ring->syncp); in ena_alloc_skb()
843 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
851 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, in ena_rx_skb() argument
863 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
866 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
871 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
879 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
880 skb = ena_alloc_skb(rx_ring, false); in ena_rx_skb()
884 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
889 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
894 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
900 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
901 rx_ring->free_rx_ids[*next_to_clean] = req_id; in ena_rx_skb()
903 rx_ring->ring_size); in ena_rx_skb()
907 skb = ena_alloc_skb(rx_ring, true); in ena_rx_skb()
912 dma_unmap_page(rx_ring->dev, in ena_rx_skb()
919 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
925 rx_ring->free_rx_ids[*next_to_clean] = req_id; in ena_rx_skb()
928 rx_ring->ring_size); in ena_rx_skb()
935 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
946 static inline void ena_rx_checksum(struct ena_ring *rx_ring, in ena_rx_checksum() argument
951 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
967 u64_stats_update_begin(&rx_ring->syncp); in ena_rx_checksum()
968 rx_ring->rx_stats.bad_csum++; in ena_rx_checksum()
969 u64_stats_update_end(&rx_ring->syncp); in ena_rx_checksum()
970 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
980 u64_stats_update_begin(&rx_ring->syncp); in ena_rx_checksum()
981 rx_ring->rx_stats.bad_csum++; in ena_rx_checksum()
982 u64_stats_update_end(&rx_ring->syncp); in ena_rx_checksum()
983 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
993 static void ena_set_rx_hash(struct ena_ring *rx_ring, in ena_set_rx_hash() argument
999 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1022 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, in ena_clean_rx_irq() argument
1025 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1038 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1039 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1043 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1044 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1046 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1047 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1055 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1057 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1061 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, in ena_clean_rx_irq()
1067 rx_ring->free_tx_ids[next_to_clean] = in ena_clean_rx_irq()
1068 rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1071 rx_ring->ring_size); in ena_clean_rx_irq()
1076 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1078 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1080 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1082 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { in ena_clean_rx_irq()
1083 total_len += rx_ring->ena_bufs[0].len; in ena_clean_rx_irq()
1095 rx_ring->per_napi_bytes += total_len; in ena_clean_rx_irq()
1096 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1097 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1098 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1099 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1100 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1101 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1103 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1105 refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1106 refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; in ena_clean_rx_irq()
1110 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); in ena_clean_rx_irq()
1111 ena_refill_rx_bufs(rx_ring, refill_required); in ena_clean_rx_irq()
1117 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1119 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1120 rx_ring->rx_stats.bad_desc_num++; in ena_clean_rx_irq()
1121 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1130 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, in ena_adjust_intr_moderation() argument
1136 ena_com_calculate_interrupt_delay(rx_ring->ena_dev, in ena_adjust_intr_moderation()
1137 rx_ring->per_napi_packets, in ena_adjust_intr_moderation()
1138 rx_ring->per_napi_bytes, in ena_adjust_intr_moderation()
1139 &rx_ring->smoothed_interval, in ena_adjust_intr_moderation()
1140 &rx_ring->moder_tbl_idx); in ena_adjust_intr_moderation()
1145 rx_ring->per_napi_packets = 0; in ena_adjust_intr_moderation()
1146 rx_ring->per_napi_bytes = 0; in ena_adjust_intr_moderation()
1150 struct ena_ring *rx_ring) in ena_unmask_interrupt() argument
1158 rx_ring->smoothed_interval, in ena_unmask_interrupt()
1166 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); in ena_unmask_interrupt()
1170 struct ena_ring *rx_ring) in ena_update_ring_numa_node() argument
1184 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1188 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1198 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1207 rx_ring = ena_napi->rx_ring; in ena_io_poll()
1218 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); in ena_io_poll()
1236 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
1237 ena_adjust_intr_moderation(rx_ring, tx_ring); in ena_io_poll()
1239 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1242 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
1279 ena_napi->rx_ring->first_interrupt = true; in ena_intr_msix_io()
1506 napi->rx_ring = &adapter->rx_ring[i]; in ena_init_napi()
1661 struct ena_ring *rx_ring; in ena_create_io_rx_queue() local
1668 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
1677 ctx.numa_node = cpu_to_node(rx_ring->cpu); in ena_create_io_rx_queue()
1688 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
1689 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
1698 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
1778 &adapter->rx_ring[i]); in ena_up()
2293 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
2315 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
2318 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); in ena_get_stats64()
2319 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
2320 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
2321 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); in ena_get_stats64()
2659 struct ena_ring *rx_ring) in check_for_rx_interrupt_queue() argument
2661 if (likely(rx_ring->first_interrupt)) in check_for_rx_interrupt_queue()
2664 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
2667 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
2669 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
2672 rx_ring->qid); in check_for_rx_interrupt_queue()
2745 struct ena_ring *rx_ring; in check_for_missing_completions() local
2764 rx_ring = &adapter->rx_ring[i]; in check_for_missing_completions()
2770 rc = check_for_rx_interrupt_queue(adapter, rx_ring); in check_for_missing_completions()
2800 struct ena_ring *rx_ring; in check_for_empty_rx_ring() local
2810 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
2813 ena_com_sq_empty_space(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
2814 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
2815 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
2817 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
2818 u64_stats_update_begin(&rx_ring->syncp); in check_for_empty_rx_ring()
2819 rx_ring->rx_stats.empty_rx_ring++; in check_for_empty_rx_ring()
2820 u64_stats_update_end(&rx_ring->syncp); in check_for_empty_rx_ring()
2825 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
2826 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
2829 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()