Home
last modified time | relevance | path

Searched refs:rx_ring (Results 1 – 25 of 254) sorted by relevance

1234567891011

/Linux-v5.15/drivers/net/ethernet/intel/ice/
Dice_txrx.c373 void ice_clean_rx_ring(struct ice_ring *rx_ring) in ice_clean_rx_ring() argument
375 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
379 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
382 if (rx_ring->skb) { in ice_clean_rx_ring()
383 dev_kfree_skb(rx_ring->skb); in ice_clean_rx_ring()
384 rx_ring->skb = NULL; in ice_clean_rx_ring()
387 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
388 ice_xsk_clean_rx_ring(rx_ring); in ice_clean_rx_ring()
393 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
394 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
[all …]
Dice_xsk.c70 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, in ice_qvec_dis_irq() argument
82 reg = rx_ring->reg_idx; in ice_qvec_dis_irq()
147 struct ice_ring *tx_ring, *rx_ring; in ice_qp_dis() local
156 rx_ring = vsi->rx_rings[q_idx]; in ice_qp_dis()
157 q_vector = rx_ring->q_vector; in ice_qp_dis()
167 ice_qvec_dis_irq(vsi, rx_ring, q_vector); in ice_qp_dis()
204 struct ice_ring *tx_ring, *rx_ring; in ice_qp_ena() local
220 rx_ring = vsi->rx_rings[q_idx]; in ice_qp_ena()
221 q_vector = rx_ring->q_vector; in ice_qp_ena()
239 err = ice_vsi_cfg_rxq(rx_ring); in ice_qp_ena()
[all …]
Dice_txrx_lib.c11 void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val) in ice_release_rx_desc() argument
13 u16 prev_ntu = rx_ring->next_to_use & ~0x7; in ice_release_rx_desc()
15 rx_ring->next_to_use = val; in ice_release_rx_desc()
18 rx_ring->next_to_alloc = val; in ice_release_rx_desc()
33 writel(val, rx_ring->tail); in ice_release_rx_desc()
69 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, in ice_rx_hash() argument
75 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) in ice_rx_hash()
181 ice_process_skb_fields(struct ice_ring *rx_ring, in ice_process_skb_fields() argument
185 ice_rx_hash(rx_ring, rx_desc, skb, ptype); in ice_process_skb_fields()
188 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ice_process_skb_fields()
[all …]
Dice_xsk.h14 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
17 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
19 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
31 ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring, in ice_clean_rx_irq_zc() argument
45 ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring, in ice_alloc_rx_bufs_zc() argument
63 static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { } in ice_xsk_clean_rx_ring() argument
/Linux-v5.15/drivers/net/ethernet/intel/i40e/
Di40e_xsk.c13 int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) in i40e_alloc_rx_bi_zc() argument
15 unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; in i40e_alloc_rx_bi_zc()
17 rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); in i40e_alloc_rx_bi_zc()
18 return rx_ring->rx_bi_zc ? 0 : -ENOMEM; in i40e_alloc_rx_bi_zc()
21 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) in i40e_clear_rx_bi_zc() argument
23 memset(rx_ring->rx_bi_zc, 0, in i40e_clear_rx_bi_zc()
24 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); in i40e_clear_rx_bi_zc()
27 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument
29 return &rx_ring->rx_bi_zc[idx]; in i40e_rx_bi()
149 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) in i40e_run_xdp_zc() argument
[all …]
Di40e_txrx.c685 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_fd_handle_status() argument
688 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
1355 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) in i40e_rx_bi() argument
1357 return &rx_ring->rx_bi[idx]; in i40e_rx_bi()
1367 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, in i40e_reuse_rx_page() argument
1371 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page()
1373 new_buff = i40e_rx_bi(rx_ring, nta); in i40e_reuse_rx_page()
1377 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page()
1385 rx_ring->rx_stats.page_reuse_count++; in i40e_reuse_rx_page()
1403 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, in i40e_clean_programming_status() argument
[all …]
Di40e_txrx_common.h8 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
10 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
13 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
16 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
17 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
102 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
/Linux-v5.15/drivers/net/ethernet/intel/ixgbe/
Dixgbe_xsk.c98 struct ixgbe_ring *rx_ring, in ixgbe_run_xdp_zc() argument
106 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp_zc()
110 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc()
132 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp_zc()
141 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) in ixgbe_alloc_rx_buffers_zc() argument
145 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers_zc()
153 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_alloc_rx_buffers_zc()
154 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers_zc()
155 i -= rx_ring->count; in ixgbe_alloc_rx_buffers_zc()
158 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc()
[all …]
Dixgbe_main.c574 struct ixgbe_ring *rx_ring; in ixgbe_dump() local
713 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
715 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
770 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
772 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
783 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
786 if (i == rx_ring->next_to_use) in ixgbe_dump()
788 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
793 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
794 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump()
[all …]
Dixgbe_txrx_common.h17 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
20 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
39 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
41 struct ixgbe_ring *rx_ring,
43 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
/Linux-v5.15/drivers/net/ethernet/intel/iavf/
Diavf_txrx.c655 void iavf_clean_rx_ring(struct iavf_ring *rx_ring) in iavf_clean_rx_ring() argument
661 if (!rx_ring->rx_bi) in iavf_clean_rx_ring()
664 if (rx_ring->skb) { in iavf_clean_rx_ring()
665 dev_kfree_skb(rx_ring->skb); in iavf_clean_rx_ring()
666 rx_ring->skb = NULL; in iavf_clean_rx_ring()
670 for (i = 0; i < rx_ring->count; i++) { in iavf_clean_rx_ring()
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; in iavf_clean_rx_ring()
679 dma_sync_single_range_for_cpu(rx_ring->dev, in iavf_clean_rx_ring()
682 rx_ring->rx_buf_len, in iavf_clean_rx_ring()
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in iavf_clean_rx_ring()
[all …]
/Linux-v5.15/drivers/net/ethernet/freescale/enetc/
Denetc.c18 if (priv->rx_ring[i]->xdp.prog) in enetc_num_stack_tx_queues()
29 return priv->rx_ring[index]; in enetc_rx_ring_from_xdp_tx_ring()
430 v->rx_ring.stats.packets, in enetc_rx_net_dim()
431 v->rx_ring.stats.bytes, in enetc_rx_net_dim()
448 static void enetc_reuse_page(struct enetc_bdr *rx_ring, in enetc_reuse_page() argument
453 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; in enetc_reuse_page()
456 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); in enetc_reuse_page()
498 struct enetc_bdr *rx_ring; in enetc_recycle_xdp_tx_buff() local
500 rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring); in enetc_recycle_xdp_tx_buff()
502 if (likely(enetc_swbd_unused(rx_ring))) { in enetc_recycle_xdp_tx_buff()
[all …]
/Linux-v5.15/drivers/net/ethernet/amazon/ena/
Dena_netdev.c72 struct ena_ring *rx_ring);
74 struct ena_ring *rx_ring);
117 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
353 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) in ena_xdp_xmit()
379 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) in ena_xdp_execute() argument
387 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); in ena_xdp_execute()
398 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); in ena_xdp_execute()
399 xdp_stat = &rx_ring->rx_stats.xdp_aborted; in ena_xdp_execute()
405 xdp_ring = rx_ring->xdp_ring; in ena_xdp_execute()
410 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, in ena_xdp_execute()
[all …]
/Linux-v5.15/drivers/staging/qlge/
Dqlge_main.c967 struct rx_ring *rx_ring) in qlge_get_curr_lchunk() argument
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq); in qlge_get_curr_lchunk()
985 static void qlge_update_cq(struct rx_ring *rx_ring) in qlge_update_cq() argument
987 rx_ring->cnsmr_idx++; in qlge_update_cq()
988 rx_ring->curr_entry++; in qlge_update_cq()
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { in qlge_update_cq()
990 rx_ring->cnsmr_idx = 0; in qlge_update_cq()
991 rx_ring->curr_entry = rx_ring->cq_base; in qlge_update_cq()
995 static void qlge_write_cq_idx(struct rx_ring *rx_ring) in qlge_write_cq_idx() argument
997 qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); in qlge_write_cq_idx()
[all …]
/Linux-v5.15/drivers/net/ethernet/netronome/nfp/
Dnfp_net_debugfs.c14 struct nfp_net_rx_ring *rx_ring; in nfp_rx_q_show() local
23 if (!r_vec->nfp_net || !r_vec->rx_ring) in nfp_rx_q_show()
26 rx_ring = r_vec->rx_ring; in nfp_rx_q_show()
30 rxd_cnt = rx_ring->cnt; in nfp_rx_q_show()
32 fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show()
33 fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show()
36 rx_ring->idx, rx_ring->fl_qcidx, in nfp_rx_q_show()
37 rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, in nfp_rx_q_show()
38 rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); in nfp_rx_q_show()
41 rxd = &rx_ring->rxds[i]; in nfp_rx_q_show()
[all …]
Dnfp_net_common.c601 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_ring_init() argument
606 rx_ring->idx = idx; in nfp_net_rx_ring_init()
607 rx_ring->r_vec = r_vec; in nfp_net_rx_ring_init()
608 u64_stats_init(&rx_ring->r_vec->rx_sync); in nfp_net_rx_ring_init()
610 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; in nfp_net_rx_ring_init()
611 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); in nfp_net_rx_ring_init()
1439 struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_give_one() argument
1444 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_net_rx_give_one()
1449 rx_ring->rxbufs[wr_idx].frag = frag; in nfp_net_rx_give_one()
1450 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; in nfp_net_rx_give_one()
[all …]
/Linux-v5.15/drivers/net/ethernet/broadcom/
Dbcm4908_enet.c75 struct bcm4908_enet_dma_ring rx_ring; member
188 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; in bcm4908_enet_dma_free() local
192 size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd); in bcm4908_enet_dma_free()
193 if (rx_ring->cpu_addr) in bcm4908_enet_dma_free()
194 dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr); in bcm4908_enet_dma_free()
195 kfree(rx_ring->slots); in bcm4908_enet_dma_free()
206 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; in bcm4908_enet_dma_alloc() local
220 rx_ring->length = ENET_RX_BDS_NUM; in bcm4908_enet_dma_alloc()
221 rx_ring->is_tx = 0; in bcm4908_enet_dma_alloc()
222 rx_ring->cfg_block = ENET_DMA_CH_RX_CFG; in bcm4908_enet_dma_alloc()
[all …]
/Linux-v5.15/drivers/net/ethernet/intel/igc/
Digc_xdp.c42 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_enable_pool() local
70 rx_ring = adapter->rx_ring[queue_id]; in igc_xdp_enable_pool()
73 napi = &rx_ring->q_vector->napi; in igc_xdp_enable_pool()
76 igc_disable_rx_ring(rx_ring); in igc_xdp_enable_pool()
81 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); in igc_xdp_enable_pool()
86 igc_enable_rx_ring(rx_ring); in igc_xdp_enable_pool()
101 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_disable_pool() local
116 rx_ring = adapter->rx_ring[queue_id]; in igc_xdp_disable_pool()
119 napi = &rx_ring->q_vector->napi; in igc_xdp_disable_pool()
122 igc_disable_rx_ring(rx_ring); in igc_xdp_disable_pool()
[all …]
Digc_dump.c119 struct igc_ring *rx_ring; in igc_rings_dump() local
214 rx_ring = adapter->rx_ring[n]; in igc_rings_dump()
215 netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, in igc_rings_dump()
216 rx_ring->next_to_clean); in igc_rings_dump()
247 rx_ring = adapter->rx_ring[n]; in igc_rings_dump()
250 rx_ring->queue_index); in igc_rings_dump()
255 for (i = 0; i < rx_ring->count; i++) { in igc_rings_dump()
259 buffer_info = &rx_ring->rx_buffer_info[i]; in igc_rings_dump()
260 rx_desc = IGC_RX_DESC(rx_ring, i); in igc_rings_dump()
264 if (i == rx_ring->next_to_use) in igc_rings_dump()
[all …]
/Linux-v5.15/drivers/net/ethernet/intel/fm10k/
Dfm10k_main.c67 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument
80 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
85 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page()
90 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page()
93 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
109 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument
113 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers()
119 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers()
120 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers()
121 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
[all …]
Dfm10k_netdev.c83 int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) in fm10k_setup_rx_resources() argument
85 struct device *dev = rx_ring->dev; in fm10k_setup_rx_resources()
88 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; in fm10k_setup_rx_resources()
90 rx_ring->rx_buffer = vzalloc(size); in fm10k_setup_rx_resources()
91 if (!rx_ring->rx_buffer) in fm10k_setup_rx_resources()
94 u64_stats_init(&rx_ring->syncp); in fm10k_setup_rx_resources()
97 rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); in fm10k_setup_rx_resources()
98 rx_ring->size = ALIGN(rx_ring->size, 4096); in fm10k_setup_rx_resources()
100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in fm10k_setup_rx_resources()
101 &rx_ring->dma, GFP_KERNEL); in fm10k_setup_rx_resources()
[all …]
/Linux-v5.15/drivers/net/ethernet/intel/igbvf/
Dnetdev.c100 napi_gro_receive(&adapter->rx_ring->napi, skb); in igbvf_receive_skb()
133 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, in igbvf_alloc_rx_buffers() argument
136 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_alloc_rx_buffers()
145 i = rx_ring->next_to_use; in igbvf_alloc_rx_buffers()
146 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
154 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); in igbvf_alloc_rx_buffers()
212 if (i == rx_ring->count) in igbvf_alloc_rx_buffers()
214 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
218 if (rx_ring->next_to_use != i) { in igbvf_alloc_rx_buffers()
219 rx_ring->next_to_use = i; in igbvf_alloc_rx_buffers()
[all …]
/Linux-v5.15/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c112 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
506 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument
510 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
511 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
522 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
524 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
528 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument
533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
537 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
[all …]
/Linux-v5.15/drivers/net/ethernet/oki-semi/pch_gbe/
Dpch_gbe_main.c560 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, in pch_gbe_alloc_queues()
561 sizeof(*adapter->rx_ring), GFP_KERNEL); in pch_gbe_alloc_queues()
562 if (!adapter->rx_ring) in pch_gbe_alloc_queues()
836 (unsigned long long)adapter->rx_ring->dma, in pch_gbe_configure_rx()
837 adapter->rx_ring->size); in pch_gbe_configure_rx()
855 rdba = adapter->rx_ring->dma; in pch_gbe_configure_rx()
856 rdlen = adapter->rx_ring->size - 0x10; in pch_gbe_configure_rx()
940 struct pch_gbe_rx_ring *rx_ring) in pch_gbe_clean_rx_ring() argument
948 for (i = 0; i < rx_ring->count; i++) { in pch_gbe_clean_rx_ring()
949 buffer_info = &rx_ring->buffer_info[i]; in pch_gbe_clean_rx_ring()
[all …]
/Linux-v5.15/drivers/net/ethernet/intel/e1000e/
Dnetdev.c204 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump() local
320 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
356 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
358 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
359 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); in e1000e_dump()
364 if (i == rx_ring->next_to_use) in e1000e_dump()
366 else if (i == rx_ring->next_to_clean) in e1000e_dump()
422 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
425 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
426 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); in e1000e_dump()
[all …]

1234567891011