Home
last modified time | relevance | path

Searched refs:rx_ring (Results 1 – 25 of 230) sorted by relevance

12345678910

/Linux-v5.4/drivers/net/ethernet/intel/ixgbe/
Dixgbe_xsk.c81 reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); in ixgbe_xsk_umem_enable()
143 struct ixgbe_ring *rx_ring, in ixgbe_run_xdp_zc() argument
146 struct xdp_umem *umem = rx_ring->xsk_umem; in ixgbe_run_xdp_zc()
154 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp_zc()
172 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc()
179 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp_zc()
190 ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, in ixgbe_get_rx_buffer_zc() argument
195 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_get_rx_buffer_zc()
198 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbe_get_rx_buffer_zc()
206 static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, in ixgbe_reuse_rx_buffer_zc() argument
[all …]
Dixgbe_txrx_common.h17 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
20 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
38 void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
40 struct ixgbe_ring *rx_ring,
42 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
Dixgbe_main.c576 struct ixgbe_ring *rx_ring; in ixgbe_dump() local
715 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
717 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump()
772 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
774 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump()
785 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump()
788 if (i == rx_ring->next_to_use) in ixgbe_dump()
790 else if (i == rx_ring->next_to_clean) in ixgbe_dump()
795 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump()
796 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/i40e/
Di40e_xsk.c191 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) in i40e_run_xdp_zc() argument
193 struct xdp_umem *umem = rx_ring->xsk_umem; in i40e_run_xdp_zc()
204 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in i40e_run_xdp_zc()
214 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc()
218 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp_zc()
225 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp_zc()
245 static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, in i40e_alloc_buffer_zc() argument
248 struct xdp_umem *umem = rx_ring->xsk_umem; in i40e_alloc_buffer_zc()
253 rx_ring->rx_stats.page_reuse_count++; in i40e_alloc_buffer_zc()
258 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_buffer_zc()
[all …]
Di40e_txrx.c530 void i40e_fd_handle_status(struct i40e_ring *rx_ring, in i40e_fd_handle_status() argument
533 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
1205 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, in i40e_reuse_rx_page() argument
1209 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page()
1211 new_buff = &rx_ring->rx_bi[nta]; in i40e_reuse_rx_page()
1215 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page()
1223 rx_ring->rx_stats.page_reuse_count++; in i40e_reuse_rx_page()
1261 struct i40e_ring *rx_ring, in i40e_clean_programming_status() argument
1272 ntc = rx_ring->next_to_clean; in i40e_clean_programming_status()
1275 rx_buffer = &rx_ring->rx_bi[ntc++]; in i40e_clean_programming_status()
[all …]
Di40e_txrx_common.h7 void i40e_fd_handle_status(struct i40e_ring *rx_ring,
11 struct i40e_ring *rx_ring,
14 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
17 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
20 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
21 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
87 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
/Linux-v5.4/drivers/staging/qlge/
Dqlge_main.c1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) in ql_get_curr_lbuf() argument
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; in ql_get_curr_lbuf()
1034 rx_ring->lbq_curr_idx++; in ql_get_curr_lbuf()
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) in ql_get_curr_lbuf()
1036 rx_ring->lbq_curr_idx = 0; in ql_get_curr_lbuf()
1037 rx_ring->lbq_free_cnt++; in ql_get_curr_lbuf()
1042 struct rx_ring *rx_ring) in ql_get_curr_lchunk() argument
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); in ql_get_curr_lchunk()
1048 rx_ring->lbq_buf_size, in ql_get_curr_lchunk()
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) in ql_get_curr_lchunk()
[all …]
Dqlge_dbg.c1628 DUMP_QDEV_FIELD(qdev, "%p", rx_ring); in ql_dump_qdev()
1733 void ql_dump_rx_ring(struct rx_ring *rx_ring) in ql_dump_rx_ring() argument
1735 if (rx_ring == NULL) in ql_dump_rx_ring()
1738 rx_ring->cq_id); in ql_dump_rx_ring()
1740 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", in ql_dump_rx_ring()
1741 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", in ql_dump_rx_ring()
1742 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); in ql_dump_rx_ring()
1743 pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); in ql_dump_rx_ring()
1744 pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); in ql_dump_rx_ring()
1746 (unsigned long long) rx_ring->cq_base_dma); in ql_dump_rx_ring()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/iavf/
Diavf_txrx.c655 void iavf_clean_rx_ring(struct iavf_ring *rx_ring) in iavf_clean_rx_ring() argument
661 if (!rx_ring->rx_bi) in iavf_clean_rx_ring()
664 if (rx_ring->skb) { in iavf_clean_rx_ring()
665 dev_kfree_skb(rx_ring->skb); in iavf_clean_rx_ring()
666 rx_ring->skb = NULL; in iavf_clean_rx_ring()
670 for (i = 0; i < rx_ring->count; i++) { in iavf_clean_rx_ring()
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; in iavf_clean_rx_ring()
679 dma_sync_single_range_for_cpu(rx_ring->dev, in iavf_clean_rx_ring()
682 rx_ring->rx_buf_len, in iavf_clean_rx_ring()
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in iavf_clean_rx_ring()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/ice/
Dice_txrx.c267 void ice_clean_rx_ring(struct ice_ring *rx_ring) in ice_clean_rx_ring() argument
269 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
273 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
277 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
278 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
303 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); in ice_clean_rx_ring()
306 memset(rx_ring->desc, 0, rx_ring->size); in ice_clean_rx_ring()
308 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
309 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
310 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
[all …]
/Linux-v5.4/drivers/net/ethernet/amazon/ena/
Dena_netdev.c105 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
177 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
329 static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id) in validate_rx_req_id() argument
331 if (likely(req_id < rx_ring->ring_size)) in validate_rx_req_id()
334 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, in validate_rx_req_id()
337 u64_stats_update_begin(&rx_ring->syncp); in validate_rx_req_id()
338 rx_ring->rx_stats.bad_req_id++; in validate_rx_req_id()
339 u64_stats_update_end(&rx_ring->syncp); in validate_rx_req_id()
342 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; in validate_rx_req_id()
343 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags); in validate_rx_req_id()
[all …]
/Linux-v5.4/drivers/net/ethernet/netronome/nfp/
Dnfp_net_debugfs.c14 struct nfp_net_rx_ring *rx_ring; in nfp_rx_q_show() local
23 if (!r_vec->nfp_net || !r_vec->rx_ring) in nfp_rx_q_show()
26 rx_ring = r_vec->rx_ring; in nfp_rx_q_show()
30 rxd_cnt = rx_ring->cnt; in nfp_rx_q_show()
32 fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show()
33 fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); in nfp_rx_q_show()
36 rx_ring->idx, rx_ring->fl_qcidx, in nfp_rx_q_show()
37 rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, in nfp_rx_q_show()
38 rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); in nfp_rx_q_show()
41 rxd = &rx_ring->rxds[i]; in nfp_rx_q_show()
[all …]
Dnfp_net_common.c594 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_ring_init() argument
599 rx_ring->idx = idx; in nfp_net_rx_ring_init()
600 rx_ring->r_vec = r_vec; in nfp_net_rx_ring_init()
601 u64_stats_init(&rx_ring->r_vec->rx_sync); in nfp_net_rx_ring_init()
603 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; in nfp_net_rx_ring_init()
604 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); in nfp_net_rx_ring_init()
1437 struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_give_one() argument
1442 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_net_rx_give_one()
1447 rx_ring->rxbufs[wr_idx].frag = frag; in nfp_net_rx_give_one()
1448 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; in nfp_net_rx_give_one()
[all …]
/Linux-v5.4/drivers/net/ethernet/freescale/enetc/
Denetc.c266 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
281 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); in enetc_poll()
410 static bool enetc_new_page(struct enetc_bdr *rx_ring, in enetc_new_page() argument
420 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in enetc_new_page()
421 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { in enetc_new_page()
434 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) in enetc_refill_rx_ring() argument
440 i = rx_ring->next_to_use; in enetc_refill_rx_ring()
441 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
442 rxbd = ENETC_RXBD(*rx_ring, i); in enetc_refill_rx_ring()
447 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { in enetc_refill_rx_ring()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/igbvf/
Dnetdev.c100 napi_gro_receive(&adapter->rx_ring->napi, skb); in igbvf_receive_skb()
133 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, in igbvf_alloc_rx_buffers() argument
136 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_alloc_rx_buffers()
145 i = rx_ring->next_to_use; in igbvf_alloc_rx_buffers()
146 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
154 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); in igbvf_alloc_rx_buffers()
212 if (i == rx_ring->count) in igbvf_alloc_rx_buffers()
214 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers()
218 if (rx_ring->next_to_use != i) { in igbvf_alloc_rx_buffers()
219 rx_ring->next_to_use = i; in igbvf_alloc_rx_buffers()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/fm10k/
Dfm10k_main.c70 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument
83 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
88 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page()
93 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page()
96 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page()
112 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument
116 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers()
122 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers()
123 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers()
124 i -= rx_ring->count; in fm10k_alloc_rx_buffers()
[all …]
Dfm10k_netdev.c83 int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) in fm10k_setup_rx_resources() argument
85 struct device *dev = rx_ring->dev; in fm10k_setup_rx_resources()
88 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; in fm10k_setup_rx_resources()
90 rx_ring->rx_buffer = vzalloc(size); in fm10k_setup_rx_resources()
91 if (!rx_ring->rx_buffer) in fm10k_setup_rx_resources()
94 u64_stats_init(&rx_ring->syncp); in fm10k_setup_rx_resources()
97 rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); in fm10k_setup_rx_resources()
98 rx_ring->size = ALIGN(rx_ring->size, 4096); in fm10k_setup_rx_resources()
100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in fm10k_setup_rx_resources()
101 &rx_ring->dma, GFP_KERNEL); in fm10k_setup_rx_resources()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c115 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
508 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument
512 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
513 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
517 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
524 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields()
526 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
530 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument
535 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
539 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/igc/
Digc_main.c69 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
356 static void igc_clean_rx_ring(struct igc_ring *rx_ring) in igc_clean_rx_ring() argument
358 u16 i = rx_ring->next_to_clean; in igc_clean_rx_ring()
360 dev_kfree_skb(rx_ring->skb); in igc_clean_rx_ring()
361 rx_ring->skb = NULL; in igc_clean_rx_ring()
364 while (i != rx_ring->next_to_alloc) { in igc_clean_rx_ring()
365 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igc_clean_rx_ring()
370 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_clean_rx_ring()
373 igc_rx_bufsz(rx_ring), in igc_clean_rx_ring()
377 dma_unmap_page_attrs(rx_ring->dev, in igc_clean_rx_ring()
[all …]
/Linux-v5.4/drivers/net/ethernet/oki-semi/pch_gbe/
Dpch_gbe_main.c579 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, in pch_gbe_alloc_queues()
580 sizeof(*adapter->rx_ring), GFP_KERNEL); in pch_gbe_alloc_queues()
581 if (!adapter->rx_ring) in pch_gbe_alloc_queues()
855 (unsigned long long)adapter->rx_ring->dma, in pch_gbe_configure_rx()
856 adapter->rx_ring->size); in pch_gbe_configure_rx()
874 rdba = adapter->rx_ring->dma; in pch_gbe_configure_rx()
875 rdlen = adapter->rx_ring->size - 0x10; in pch_gbe_configure_rx()
959 struct pch_gbe_rx_ring *rx_ring) in pch_gbe_clean_rx_ring() argument
967 for (i = 0; i < rx_ring->count; i++) { in pch_gbe_clean_rx_ring()
968 buffer_info = &rx_ring->buffer_info[i]; in pch_gbe_clean_rx_ring()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/e1000e/
Dnetdev.c208 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump() local
324 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
360 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
362 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
363 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); in e1000e_dump()
368 if (i == rx_ring->next_to_use) in e1000e_dump()
370 else if (i == rx_ring->next_to_clean) in e1000e_dump()
426 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
429 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
430 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); in e1000e_dump()
[all …]
/Linux-v5.4/drivers/net/ethernet/agere/
Det131x.c288 struct rx_ring { struct
492 struct rx_ring rx_ring; member
740 struct rx_ring *rx_ring = &adapter->rx_ring; in et131x_rx_dma_enable() local
742 if (rx_ring->fbr[1]->buffsize == 4096) in et131x_rx_dma_enable()
744 else if (rx_ring->fbr[1]->buffsize == 8192) in et131x_rx_dma_enable()
746 else if (rx_ring->fbr[1]->buffsize == 16384) in et131x_rx_dma_enable()
750 if (rx_ring->fbr[0]->buffsize == 256) in et131x_rx_dma_enable()
752 else if (rx_ring->fbr[0]->buffsize == 512) in et131x_rx_dma_enable()
754 else if (rx_ring->fbr[0]->buffsize == 1024) in et131x_rx_dma_enable()
1543 struct rx_ring *rx_local = &adapter->rx_ring; in et131x_config_rx_dma_regs()
[all …]
/Linux-v5.4/drivers/net/wireless/ath/ath10k/
Dhtt_rx.c31 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) in ath10k_htt_rx_find_skb_paddr()
46 if (htt->rx_ring.in_ord_rx) { in ath10k_htt_rx_ring_free()
47 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { in ath10k_htt_rx_ring_free()
56 for (i = 0; i < htt->rx_ring.size; i++) { in ath10k_htt_rx_ring_free()
57 skb = htt->rx_ring.netbufs_ring[i]; in ath10k_htt_rx_ring_free()
69 htt->rx_ring.fill_cnt = 0; in ath10k_htt_rx_ring_free()
70 hash_init(htt->rx_ring.skb_table); in ath10k_htt_rx_ring_free()
71 memset(htt->rx_ring.netbufs_ring, 0, in ath10k_htt_rx_ring_free()
72 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); in ath10k_htt_rx_ring_free()
77 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); in ath10k_htt_get_rx_ring_size_32()
[all …]
/Linux-v5.4/drivers/net/wireless/realtek/rtw88/
Dpci.c128 struct rtw_pci_rx_ring *rx_ring) in rtw_pci_free_rx_ring_skbs() argument
136 for (i = 0; i < rx_ring->r.len; i++) { in rtw_pci_free_rx_ring_skbs()
137 skb = rx_ring->buf[i]; in rtw_pci_free_rx_ring_skbs()
144 rx_ring->buf[i] = NULL; in rtw_pci_free_rx_ring_skbs()
149 struct rtw_pci_rx_ring *rx_ring) in rtw_pci_free_rx_ring() argument
152 u8 *head = rx_ring->r.head; in rtw_pci_free_rx_ring()
153 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; in rtw_pci_free_rx_ring()
155 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); in rtw_pci_free_rx_ring()
157 pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); in rtw_pci_free_rx_ring()
164 struct rtw_pci_rx_ring *rx_ring; in rtw_pci_free_trx_ring() local
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/igb/
Digb_main.c366 struct igb_ring *rx_ring; in igb_dump() local
471 rx_ring = adapter->rx_ring[n]; in igb_dump()
473 n, rx_ring->next_to_use, rx_ring->next_to_clean); in igb_dump()
504 rx_ring = adapter->rx_ring[n]; in igb_dump()
506 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in igb_dump()
511 for (i = 0; i < rx_ring->count; i++) { in igb_dump()
514 buffer_info = &rx_ring->rx_buffer_info[i]; in igb_dump()
515 rx_desc = IGB_RX_DESC(rx_ring, i); in igb_dump()
519 if (i == rx_ring->next_to_use) in igb_dump()
521 else if (i == rx_ring->next_to_clean) in igb_dump()
[all …]

12345678910