/Linux-v4.19/drivers/net/ethernet/qlogic/qlge/ |
D | qlge_main.c | 1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) in ql_get_curr_lbuf() argument 1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; in ql_get_curr_lbuf() 1034 rx_ring->lbq_curr_idx++; in ql_get_curr_lbuf() 1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) in ql_get_curr_lbuf() 1036 rx_ring->lbq_curr_idx = 0; in ql_get_curr_lbuf() 1037 rx_ring->lbq_free_cnt++; in ql_get_curr_lbuf() 1042 struct rx_ring *rx_ring) in ql_get_curr_lchunk() argument 1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); in ql_get_curr_lchunk() 1048 rx_ring->lbq_buf_size, in ql_get_curr_lchunk() 1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) in ql_get_curr_lchunk() [all …]
|
D | qlge_dbg.c | 1628 DUMP_QDEV_FIELD(qdev, "%p", rx_ring); in ql_dump_qdev() 1733 void ql_dump_rx_ring(struct rx_ring *rx_ring) in ql_dump_rx_ring() argument 1735 if (rx_ring == NULL) in ql_dump_rx_ring() 1738 rx_ring->cq_id); in ql_dump_rx_ring() 1740 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", in ql_dump_rx_ring() 1741 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", in ql_dump_rx_ring() 1742 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); in ql_dump_rx_ring() 1743 pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); in ql_dump_rx_ring() 1744 pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); in ql_dump_rx_ring() 1746 (unsigned long long) rx_ring->cq_base_dma); in ql_dump_rx_ring() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/ice/ |
D | ice_txrx.c | 266 void ice_clean_rx_ring(struct ice_ring *rx_ring) in ice_clean_rx_ring() argument 268 struct device *dev = rx_ring->dev; in ice_clean_rx_ring() 273 if (!rx_ring->rx_buf) in ice_clean_rx_ring() 277 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring() 278 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring() 294 size = sizeof(struct ice_rx_buf) * rx_ring->count; in ice_clean_rx_ring() 295 memset(rx_ring->rx_buf, 0, size); in ice_clean_rx_ring() 298 memset(rx_ring->desc, 0, rx_ring->size); in ice_clean_rx_ring() 300 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring() 301 rx_ring->next_to_clean = 0; in ice_clean_rx_ring() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 656 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) in i40evf_clean_rx_ring() argument 662 if (!rx_ring->rx_bi) in i40evf_clean_rx_ring() 665 if (rx_ring->skb) { in i40evf_clean_rx_ring() 666 dev_kfree_skb(rx_ring->skb); in i40evf_clean_rx_ring() 667 rx_ring->skb = NULL; in i40evf_clean_rx_ring() 671 for (i = 0; i < rx_ring->count; i++) { in i40evf_clean_rx_ring() 672 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; in i40evf_clean_rx_ring() 680 dma_sync_single_range_for_cpu(rx_ring->dev, in i40evf_clean_rx_ring() 683 rx_ring->rx_buf_len, in i40evf_clean_rx_ring() 687 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in i40evf_clean_rx_ring() [all …]
|
/Linux-v4.19/drivers/net/ethernet/amazon/ena/ |
D | ena_netdev.c | 106 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu() 179 rxr = &adapter->rx_ring[i]; in ena_init_io_rings() 312 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id) in validate_rx_req_id() argument 314 if (likely(req_id < rx_ring->ring_size)) in validate_rx_req_id() 317 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, in validate_rx_req_id() 320 u64_stats_update_begin(&rx_ring->syncp); in validate_rx_req_id() 321 rx_ring->rx_stats.bad_req_id++; in validate_rx_req_id() 322 u64_stats_update_end(&rx_ring->syncp); in validate_rx_req_id() 325 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; in validate_rx_req_id() 326 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags); in validate_rx_req_id() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 539 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, in i40e_fd_handle_status() argument 542 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status() 1230 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, in i40e_reuse_rx_page() argument 1234 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page() 1236 new_buff = &rx_ring->rx_bi[nta]; in i40e_reuse_rx_page() 1240 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page() 1279 static void i40e_clean_programming_status(struct i40e_ring *rx_ring, in i40e_clean_programming_status() argument 1284 u32 ntc = rx_ring->next_to_clean; in i40e_clean_programming_status() 1288 rx_buffer = &rx_ring->rx_bi[ntc++]; in i40e_clean_programming_status() 1289 ntc = (ntc < rx_ring->count) ? ntc : 0; in i40e_clean_programming_status() [all …]
|
/Linux-v4.19/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_debugfs.c | 44 struct nfp_net_rx_ring *rx_ring; in nfp_net_debugfs_rx_q_read() local 53 if (!r_vec->nfp_net || !r_vec->rx_ring) in nfp_net_debugfs_rx_q_read() 56 rx_ring = r_vec->rx_ring; in nfp_net_debugfs_rx_q_read() 60 rxd_cnt = rx_ring->cnt; in nfp_net_debugfs_rx_q_read() 62 fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); in nfp_net_debugfs_rx_q_read() 63 fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); in nfp_net_debugfs_rx_q_read() 66 rx_ring->idx, rx_ring->fl_qcidx, in nfp_net_debugfs_rx_q_read() 67 rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, in nfp_net_debugfs_rx_q_read() 68 rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); in nfp_net_debugfs_rx_q_read() 71 rxd = &rx_ring->rxds[i]; in nfp_net_debugfs_rx_q_read() [all …]
|
D | nfp_net_common.c | 554 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_ring_init() argument 559 rx_ring->idx = idx; in nfp_net_rx_ring_init() 560 rx_ring->r_vec = r_vec; in nfp_net_rx_ring_init() 561 u64_stats_init(&rx_ring->r_vec->rx_sync); in nfp_net_rx_ring_init() 563 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; in nfp_net_rx_ring_init() 564 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); in nfp_net_rx_ring_init() 1266 struct nfp_net_rx_ring *rx_ring, in nfp_net_rx_give_one() argument 1271 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); in nfp_net_rx_give_one() 1276 rx_ring->rxbufs[wr_idx].frag = frag; in nfp_net_rx_give_one() 1277 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; in nfp_net_rx_give_one() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/igbvf/ |
D | netdev.c | 100 napi_gro_receive(&adapter->rx_ring->napi, skb); in igbvf_receive_skb() 133 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, in igbvf_alloc_rx_buffers() argument 136 struct igbvf_adapter *adapter = rx_ring->adapter; in igbvf_alloc_rx_buffers() 145 i = rx_ring->next_to_use; in igbvf_alloc_rx_buffers() 146 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers() 154 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); in igbvf_alloc_rx_buffers() 212 if (i == rx_ring->count) in igbvf_alloc_rx_buffers() 214 buffer_info = &rx_ring->buffer_info[i]; in igbvf_alloc_rx_buffers() 218 if (rx_ring->next_to_use != i) { in igbvf_alloc_rx_buffers() 219 rx_ring->next_to_use = i; in igbvf_alloc_rx_buffers() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 68 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, in fm10k_alloc_mapped_page() argument 81 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page() 86 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); in fm10k_alloc_mapped_page() 91 if (dma_mapping_error(rx_ring->dev, dma)) { in fm10k_alloc_mapped_page() 94 rx_ring->rx_stats.alloc_failed++; in fm10k_alloc_mapped_page() 110 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) in fm10k_alloc_rx_buffers() argument 114 u16 i = rx_ring->next_to_use; in fm10k_alloc_rx_buffers() 120 rx_desc = FM10K_RX_DESC(rx_ring, i); in fm10k_alloc_rx_buffers() 121 bi = &rx_ring->rx_buffer[i]; in fm10k_alloc_rx_buffers() 122 i -= rx_ring->count; in fm10k_alloc_rx_buffers() [all …]
|
D | fm10k_netdev.c | 83 int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) in fm10k_setup_rx_resources() argument 85 struct device *dev = rx_ring->dev; in fm10k_setup_rx_resources() 88 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; in fm10k_setup_rx_resources() 90 rx_ring->rx_buffer = vzalloc(size); in fm10k_setup_rx_resources() 91 if (!rx_ring->rx_buffer) in fm10k_setup_rx_resources() 94 u64_stats_init(&rx_ring->syncp); in fm10k_setup_rx_resources() 97 rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); in fm10k_setup_rx_resources() 98 rx_ring->size = ALIGN(rx_ring->size, 4096); in fm10k_setup_rx_resources() 100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in fm10k_setup_rx_resources() 101 &rx_ring->dma, GFP_KERNEL); in fm10k_setup_rx_resources() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 114 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, 504 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, in ixgbevf_process_skb_fields() argument 508 ixgbevf_rx_hash(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 509 ixgbevf_rx_checksum(rx_ring, rx_desc, skb); in ixgbevf_process_skb_fields() 513 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields() 519 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields() 523 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_get_rx_buffer() argument 528 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer() 532 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer() 543 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring, in ixgbevf_put_rx_buffer() argument [all …]
|
D | ethtool.c | 248 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; in ixgbevf_set_ringparam() local 277 adapter->rx_ring[i]->count = new_rx_count; in ixgbevf_set_ringparam() 331 rx_ring = vmalloc(array_size(sizeof(*rx_ring), in ixgbevf_set_ringparam() 333 if (!rx_ring) { in ixgbevf_set_ringparam() 340 rx_ring[i] = *adapter->rx_ring[i]; in ixgbevf_set_ringparam() 343 memset(&rx_ring[i].xdp_rxq, 0, in ixgbevf_set_ringparam() 344 sizeof(rx_ring[i].xdp_rxq)); in ixgbevf_set_ringparam() 346 rx_ring[i].count = new_rx_count; in ixgbevf_set_ringparam() 347 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); in ixgbevf_set_ringparam() 351 ixgbevf_free_rx_resources(&rx_ring[i]); in ixgbevf_set_ringparam() [all …]
|
/Linux-v4.19/drivers/net/ethernet/oki-semi/pch_gbe/ |
D | pch_gbe_main.c | 593 adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, in pch_gbe_alloc_queues() 594 sizeof(*adapter->rx_ring), GFP_KERNEL); in pch_gbe_alloc_queues() 595 if (!adapter->rx_ring) in pch_gbe_alloc_queues() 869 (unsigned long long)adapter->rx_ring->dma, in pch_gbe_configure_rx() 870 adapter->rx_ring->size); in pch_gbe_configure_rx() 888 rdba = adapter->rx_ring->dma; in pch_gbe_configure_rx() 889 rdlen = adapter->rx_ring->size - 0x10; in pch_gbe_configure_rx() 973 struct pch_gbe_rx_ring *rx_ring) in pch_gbe_clean_rx_ring() argument 981 for (i = 0; i < rx_ring->count; i++) { in pch_gbe_clean_rx_ring() 982 buffer_info = &rx_ring->buffer_info[i]; in pch_gbe_clean_rx_ring() [all …]
|
/Linux-v4.19/drivers/net/wireless/ath/ath10k/ |
D | htt_rx.c | 42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) in ath10k_htt_rx_find_skb_paddr() 57 if (htt->rx_ring.in_ord_rx) { in ath10k_htt_rx_ring_free() 58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { in ath10k_htt_rx_ring_free() 67 for (i = 0; i < htt->rx_ring.size; i++) { in ath10k_htt_rx_ring_free() 68 skb = htt->rx_ring.netbufs_ring[i]; in ath10k_htt_rx_ring_free() 80 htt->rx_ring.fill_cnt = 0; in ath10k_htt_rx_ring_free() 81 hash_init(htt->rx_ring.skb_table); in ath10k_htt_rx_ring_free() 82 memset(htt->rx_ring.netbufs_ring, 0, in ath10k_htt_rx_ring_free() 83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); in ath10k_htt_rx_ring_free() 88 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); in ath10k_htt_get_rx_ring_size_32() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/e1000e/ |
D | netdev.c | 208 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump() local 324 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump() 360 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump() 362 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump() 363 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); in e1000e_dump() 368 if (i == rx_ring->next_to_use) in e1000e_dump() 370 else if (i == rx_ring->next_to_clean) in e1000e_dump() 426 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump() 429 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump() 430 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); in e1000e_dump() [all …]
|
D | ethtool.c | 694 adapter->rx_ring->count = new_rx_count; in e1000_set_ringparam() 735 memcpy(temp_rx, adapter->rx_ring, size); in e1000_set_ringparam() 749 e1000e_free_rx_resources(adapter->rx_ring); in e1000_set_ringparam() 750 memcpy(adapter->rx_ring, temp_rx, size); in e1000_set_ringparam() 1115 struct e1000_ring *rx_ring = &adapter->test_rx_ring; in e1000_free_desc_rings() local 1134 if (rx_ring->desc && rx_ring->buffer_info) { in e1000_free_desc_rings() 1135 for (i = 0; i < rx_ring->count; i++) { in e1000_free_desc_rings() 1136 buffer_info = &rx_ring->buffer_info[i]; in e1000_free_desc_rings() 1152 if (rx_ring->desc) { in e1000_free_desc_rings() 1153 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in e1000_free_desc_rings() [all …]
|
/Linux-v4.19/drivers/net/ethernet/agere/ |
D | et131x.c | 288 struct rx_ring { struct 492 struct rx_ring rx_ring; member 740 struct rx_ring *rx_ring = &adapter->rx_ring; in et131x_rx_dma_enable() local 742 if (rx_ring->fbr[1]->buffsize == 4096) in et131x_rx_dma_enable() 744 else if (rx_ring->fbr[1]->buffsize == 8192) in et131x_rx_dma_enable() 746 else if (rx_ring->fbr[1]->buffsize == 16384) in et131x_rx_dma_enable() 750 if (rx_ring->fbr[0]->buffsize == 256) in et131x_rx_dma_enable() 752 else if (rx_ring->fbr[0]->buffsize == 512) in et131x_rx_dma_enable() 754 else if (rx_ring->fbr[0]->buffsize == 1024) in et131x_rx_dma_enable() 1543 struct rx_ring *rx_local = &adapter->rx_ring; in et131x_config_rx_dma_regs() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_main.c | 571 struct ixgbe_ring *rx_ring; in ixgbe_dump() local 710 rx_ring = adapter->rx_ring[n]; in ixgbe_dump() 712 n, rx_ring->next_to_use, rx_ring->next_to_clean); in ixgbe_dump() 767 rx_ring = adapter->rx_ring[n]; in ixgbe_dump() 769 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in ixgbe_dump() 780 for (i = 0; i < rx_ring->count; i++) { in ixgbe_dump() 783 if (i == rx_ring->next_to_use) in ixgbe_dump() 785 else if (i == rx_ring->next_to_clean) in ixgbe_dump() 790 rx_buffer_info = &rx_ring->rx_buffer_info[i]; in ixgbe_dump() 791 rx_desc = IXGBE_RX_DESC(rx_ring, i); in ixgbe_dump() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 366 struct igb_ring *rx_ring; in igb_dump() local 471 rx_ring = adapter->rx_ring[n]; in igb_dump() 473 n, rx_ring->next_to_use, rx_ring->next_to_clean); in igb_dump() 504 rx_ring = adapter->rx_ring[n]; in igb_dump() 506 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); in igb_dump() 511 for (i = 0; i < rx_ring->count; i++) { in igb_dump() 514 buffer_info = &rx_ring->rx_buffer_info[i]; in igb_dump() 515 rx_desc = IGB_RX_DESC(rx_ring, i); in igb_dump() 519 if (i == rx_ring->next_to_use) in igb_dump() 521 else if (i == rx_ring->next_to_clean) in igb_dump() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/e1000/ |
D | e1000_main.c | 83 struct e1000_rx_ring *rx_ring); 102 struct e1000_rx_ring *rx_ring); 116 struct e1000_rx_ring *rx_ring, 119 struct e1000_rx_ring *rx_ring, 122 struct e1000_rx_ring *rx_ring, in e1000_alloc_dummy_rx_buffers() argument 127 struct e1000_rx_ring *rx_ring, 130 struct e1000_rx_ring *rx_ring, 382 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; in e1000_configure() 1231 kfree(adapter->rx_ring); in e1000_probe() 1272 kfree(adapter->rx_ring); in e1000_remove() [all …]
|
/Linux-v4.19/drivers/net/ethernet/intel/ixgb/ |
D | ixgb_main.c | 193 ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring)); in ixgb_up() 752 struct ixgb_desc_ring *rxdr = &adapter->rx_ring; in ixgb_setup_rx_resources() 823 u64 rdba = adapter->rx_ring.dma; in ixgb_configure_rx() 824 u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); in ixgb_configure_rx() 959 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; in ixgb_free_rx_resources() local 964 vfree(rx_ring->buffer_info); in ixgb_free_rx_resources() 965 rx_ring->buffer_info = NULL; in ixgb_free_rx_resources() 967 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in ixgb_free_rx_resources() 968 rx_ring->dma); in ixgb_free_rx_resources() 970 rx_ring->desc = NULL; in ixgb_free_rx_resources() [all …]
|
/Linux-v4.19/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_main.c | 230 struct xgene_enet_desc_ring *rx_ring = data; in xgene_enet_rx_irq() local 232 if (napi_schedule_prep(&rx_ring->napi)) { in xgene_enet_rx_irq() 234 __napi_schedule(&rx_ring->napi); in xgene_enet_rx_irq() 682 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, in xgene_enet_rx_frame() argument 700 ndev = rx_ring->ndev; in xgene_enet_rx_frame() 702 dev = ndev_to_dev(rx_ring->ndev); in xgene_enet_rx_frame() 703 buf_pool = rx_ring->buf_pool; in xgene_enet_rx_frame() 704 page_pool = rx_ring->page_pool; in xgene_enet_rx_frame() 728 xgene_enet_parse_error(rx_ring, status); in xgene_enet_rx_frame() 729 rx_ring->rx_dropped++; in xgene_enet_rx_frame() [all …]
|
/Linux-v4.19/drivers/net/ethernet/packetengines/ |
D | yellowfin.c | 309 struct yellowfin_desc *rx_ring; member 446 np->rx_ring = ring_space; in yellowfin_init_one() 511 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); in yellowfin_init_one() 693 pr_warn(" Rx ring %p: ", yp->rx_ring); in yellowfin_tx_timeout() 695 pr_cont(" %08x", yp->rx_ring[i].result_status); in yellowfin_tx_timeout() 731 yp->rx_ring[i].dbdma_cmd = in yellowfin_init_ring() 733 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + in yellowfin_init_ring() 743 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, in yellowfin_init_ring() 751 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring() 1041 entry, yp->rx_ring[entry].result_status); in yellowfin_rx() [all …]
|
/Linux-v4.19/drivers/net/ethernet/amd/ |
D | lance.c | 231 u32 rx_ring; /* Tx and Rx ring base pointers */ member 237 struct lance_rx_head rx_ring[RX_RING_SIZE]; member 574 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; in lance_probe1() 789 (u32) isa_virt_to_bus(lp->rx_ring), in lance_open() 846 lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */ in lance_purge_ring() 880 lp->rx_ring[i].base = 0; in lance_init_ring() 882 lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000; in lance_init_ring() 883 lp->rx_ring[i].buf_length = -PKT_BUF_SZ; in lance_init_ring() 897 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; in lance_init_ring() 934 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, in lance_tx_timeout() [all …]
|