Home
last modified time | relevance | path

Searched refs:tx_rings (Results 1 – 25 of 36) sorted by relevance

12

/Linux-v5.4/drivers/net/wireless/realtek/rtw88/
Dpci.c168 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_free_trx_ring()
324 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
346 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
398 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; in rtw_pci_reset_buf_desc()
401 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; in rtw_pci_reset_buf_desc()
402 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; in rtw_pci_reset_buf_desc()
403 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; in rtw_pci_reset_buf_desc()
404 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; in rtw_pci_reset_buf_desc()
408 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; in rtw_pci_reset_buf_desc()
409 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; in rtw_pci_reset_buf_desc()
[all …]
Dpci.h191 struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM]; member
/Linux-v5.4/drivers/net/ethernet/aquantia/atlantic/
Daq_vec.c22 unsigned int tx_rings; member
46 self->tx_rings > i; ++i, ring = self->ring[i]) { in aq_vec_poll()
125 self->tx_rings = 0; in aq_vec_alloc()
133 self->tx_rings, in aq_vec_alloc()
143 ++self->tx_rings; in aq_vec_alloc()
176 self->tx_rings > i; ++i, ring = self->ring[i]) { in aq_vec_init()
218 self->tx_rings > i; ++i, ring = self->ring[i]) { in aq_vec_start()
242 self->tx_rings > i; ++i, ring = self->ring[i]) { in aq_vec_stop()
262 self->tx_rings > i; ++i, ring = self->ring[i]) { in aq_vec_deinit()
278 self->tx_rings > i; ++i, ring = self->ring[i]) { in aq_vec_free()
[all …]
Daq_hw.h51 u8 tx_rings; member
/Linux-v5.4/drivers/net/ethernet/intel/ice/
Dice_ethtool.c663 tx_ring = test_vsi->tx_rings[0]; in ice_loopback_test()
1320 ring = READ_ONCE(vsi->tx_rings[j]); in ice_get_ethtool_stats()
2566 ring->tx_pending = vsi->tx_rings[0]->count; in ice_get_ringparam()
2578 struct ice_ring *tx_rings = NULL, *rx_rings = NULL; in ice_set_ringparam() local
2608 if (new_tx_cnt == vsi->tx_rings[0]->count && in ice_set_ringparam()
2624 vsi->tx_rings[i]->count = new_tx_cnt; in ice_set_ringparam()
2631 if (new_tx_cnt == vsi->tx_rings[0]->count) in ice_set_ringparam()
2636 vsi->tx_rings[0]->count, new_tx_cnt); in ice_set_ringparam()
2638 tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, in ice_set_ringparam()
2639 sizeof(*tx_rings), GFP_KERNEL); in ice_set_ringparam()
[all …]
Dice_lib.c263 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, in ice_vsi_alloc_arrays()
264 sizeof(*vsi->tx_rings), GFP_KERNEL); in ice_vsi_alloc_arrays()
265 if (!vsi->tx_rings) in ice_vsi_alloc_arrays()
304 devm_kfree(&pf->pdev->dev, vsi->tx_rings); in ice_vsi_alloc_arrays()
455 if (vsi->tx_rings) { in ice_vsi_free_arrays()
456 devm_kfree(&pf->pdev->dev, vsi->tx_rings); in ice_vsi_free_arrays()
457 vsi->tx_rings = NULL; in ice_vsi_free_arrays()
1271 if (vsi->tx_rings) { in ice_vsi_clear_rings()
1273 if (vsi->tx_rings[i]) { in ice_vsi_clear_rings()
1274 kfree_rcu(vsi->tx_rings[i], rcu); in ice_vsi_clear_rings()
[all …]
Dice_dcb_lib.c115 tx_ring = vsi->tx_rings[i]; in ice_vsi_cfg_dcb_rings()
132 tx_ring = vsi->tx_rings[i]; in ice_vsi_cfg_dcb_rings()
Dice_main.c94 struct ice_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask()
3639 ring = READ_ONCE(vsi->tx_rings[i]); in ice_update_vsi_ring_stats()
3941 ice_clean_tx_ring(vsi->tx_rings[i]); in ice_down()
3973 vsi->tx_rings[i]->netdev = vsi->netdev; in ice_vsi_setup_tx_rings()
3974 err = ice_setup_tx_ring(vsi->tx_rings[i]); in ice_vsi_setup_tx_rings()
4661 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_tx_timeout()
4662 if (hung_queue == vsi->tx_rings[i]->q_index) { in ice_tx_timeout()
4663 tx_ring = vsi->tx_rings[i]; in ice_tx_timeout()
Dice.h217 struct ice_ring **tx_rings; /* Tx ring array */ member
Dice_virtchnl_pf.c2050 struct ice_ring *ring = vsi->tx_rings[vf_q_id]; in ice_vc_dis_qs_msg()
2207 vsi->tx_rings[vsi_q_id]->q_vector = q_vector; in ice_vc_cfg_irq_map_msg()
2277 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; in ice_vc_cfg_qs_msg()
2278 vsi->tx_rings[i]->count = qpi->txq.ring_len; in ice_vc_cfg_qs_msg()
/Linux-v5.4/drivers/net/ethernet/intel/i40e/
Di40e_ethtool.c1919 ring->tx_pending = vsi->tx_rings[0]->count; in i40e_get_ringparam()
1938 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; in i40e_set_ringparam() local
1966 if ((new_tx_count == vsi->tx_rings[0]->count) && in i40e_set_ringparam()
1987 vsi->tx_rings[i]->count = new_tx_count; in i40e_set_ringparam()
2005 if (new_tx_count != vsi->tx_rings[0]->count) { in i40e_set_ringparam()
2008 vsi->tx_rings[0]->count, new_tx_count); in i40e_set_ringparam()
2009 tx_rings = kcalloc(tx_alloc_queue_pairs, in i40e_set_ringparam()
2011 if (!tx_rings) { in i40e_set_ringparam()
2020 tx_rings[i] = *vsi->tx_rings[i]; in i40e_set_ringparam()
2021 tx_rings[i].count = new_tx_count; in i40e_set_ringparam()
[all …]
Di40e_main.c335 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout()
337 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout()
338 tx_ring = vsi->tx_rings[i]; in i40e_tx_timeout()
447 if (!vsi->tx_rings) in i40e_get_netdev_stats_struct()
455 ring = READ_ONCE(vsi->tx_rings[i]); in i40e_get_netdev_stats_struct()
511 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
512 sizeof(vsi->tx_rings[i]->stats)); in i40e_vsi_reset_stats()
513 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats()
514 sizeof(vsi->tx_rings[i]->tx_stats)); in i40e_vsi_reset_stats()
808 p = READ_ONCE(vsi->tx_rings[q]); in i40e_update_vsi_stats()
[all …]
Di40e_debugfs.c285 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); in i40e_dbg_dump_vsi_seid()
512 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { in i40e_dbg_dump_desc()
520 ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], in i40e_dbg_dump_desc()
/Linux-v5.4/drivers/thunderbolt/
Dnhi.c447 if (!nhi->tx_rings[i]) { in nhi_alloc_hop()
465 if (ring->is_tx && nhi->tx_rings[ring->hop]) { in nhi_alloc_hop()
478 nhi->tx_rings[ring->hop] = ring; in nhi_alloc_hop()
716 ring->nhi->tx_rings[ring->hop] = NULL; in tb_ring_free()
838 ring = nhi->tx_rings[hop]; in nhi_interrupt_work()
1020 if (nhi->tx_rings[i]) in nhi_shutdown()
1130 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe()
1131 sizeof(*nhi->tx_rings), GFP_KERNEL); in nhi_probe()
1134 if (!nhi->tx_rings || !nhi->rx_rings) in nhi_probe()
/Linux-v5.4/drivers/net/ethernet/broadcom/genet/
Dbcmgenet.c755 tx_rings[num].packets), \
757 tx_rings[num].bytes), \
1474 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); in bcmgenet_tx_reclaim_all()
1477 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); in bcmgenet_tx_reclaim_all()
1572 ring = &priv->tx_rings[index]; in bcmgenet_xmit()
2116 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; in bcmgenet_init_tx_ring()
2236 ring = &priv->tx_rings[i]; in bcmgenet_enable_tx_napi()
2241 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_enable_tx_napi()
2252 ring = &priv->tx_rings[i]; in bcmgenet_disable_tx_napi()
2256 ring = &priv->tx_rings[DESC_INDEX]; in bcmgenet_disable_tx_napi()
[all …]
Dbcmgenet.h618 struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; member
/Linux-v5.4/drivers/net/ethernet/broadcom/
Dbcmsysport.c437 ring = &priv->tx_rings[q]; in bcm_sysport_update_tx_stats()
503 ring = &priv->tx_rings[i]; in bcm_sysport_get_stats()
632 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); in bcm_sysport_set_coalesce()
987 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); in bcm_sysport_tx_reclaim_all()
1141 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr()
1173 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr()
1290 ring = &priv->tx_rings[queue]; in bcm_sysport_xmit()
1467 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_init_tx_ring()
1552 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_fini_tx_ring()
2315 ring = &priv->tx_rings[q]; in bcm_sysport_map_queues()
[all …]
Dbcmsysport.h745 struct bcm_sysport_tx_ring *tx_rings; member
/Linux-v5.4/drivers/net/ethernet/netronome/nfp/
Dnfp_net_common.c993 tx_ring = &dp->tx_rings[qidx]; in nfp_net_tx()
2448 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), in nfp_net_tx_rings_prepare()
2450 if (!dp->tx_rings) in nfp_net_tx_rings_prepare()
2459 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], in nfp_net_tx_rings_prepare()
2462 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
2465 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
2473 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
2475 nfp_net_tx_ring_free(&dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
2477 kfree(dp->tx_rings); in nfp_net_tx_rings_prepare()
2486 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_free()
[all …]
/Linux-v5.4/drivers/net/ethernet/intel/iavf/
Diavf_ethtool.c354 &adapter->tx_rings[i] : NULL); in iavf_get_ethtool_stats()
660 tx_ring = &adapter->tx_rings[queue]; in __iavf_get_coalesce()
716 struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; in iavf_set_itr_per_queue()
Diavf_main.c331 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; in iavf_map_vector_to_txq()
560 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); in iavf_configure_tx()
1081 kfree(adapter->tx_rings); in iavf_free_queues()
1082 adapter->tx_rings = NULL; in iavf_free_queues()
1115 adapter->tx_rings = kcalloc(num_active_queues, in iavf_alloc_queues()
1117 if (!adapter->tx_rings) in iavf_alloc_queues()
1128 tx_ring = &adapter->tx_rings[i]; in iavf_alloc_queues()
2392 if (!adapter->tx_rings) in iavf_free_all_tx_resources()
2396 if (adapter->tx_rings[i].desc) in iavf_free_all_tx_resources()
2397 iavf_free_tx_resources(&adapter->tx_rings[i]); in iavf_free_all_tx_resources()
[all …]
Diavf.h235 struct iavf_ring *tx_rings; member
/Linux-v5.4/drivers/net/ethernet/sun/
Dniu.c3591 index = (rp - np->tx_rings); in niu_tx_work()
3738 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_poll_core()
4092 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_slowpath_interrupt()
4170 struct tx_ring_info *rp = &np->tx_rings[i]; in __niu_fastpath_interrupt()
4310 if (np->tx_rings) { in niu_free_channels()
4312 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_free_channels()
4316 kfree(np->tx_rings); in niu_free_channels()
4317 np->tx_rings = NULL; in niu_free_channels()
4458 struct tx_ring_info *tx_rings; in niu_alloc_channels() local
4511 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), in niu_alloc_channels()
[all …]
/Linux-v5.4/include/linux/
Dthunderbolt.h448 struct tb_ring **tx_rings; member
/Linux-v5.4/drivers/net/ethernet/broadcom/bnxt/
Dbnxt.c5698 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) in __bnxt_hwrm_get_tx_rings() argument
5711 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); in __bnxt_hwrm_get_tx_rings()
5720 int tx_rings, int rx_rings, int ring_grps, in __bnxt_hwrm_reserve_pf_rings() argument
5727 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_pf_rings()
5728 req->num_tx_rings = cpu_to_le16(tx_rings); in __bnxt_hwrm_reserve_pf_rings()
5734 enables |= tx_rings + ring_grps ? in __bnxt_hwrm_reserve_pf_rings()
5749 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); in __bnxt_hwrm_reserve_pf_rings()
5770 struct hwrm_func_vf_cfg_input *req, int tx_rings, in __bnxt_hwrm_reserve_vf_rings() argument
5777 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; in __bnxt_hwrm_reserve_vf_rings()
5782 enables |= tx_rings + ring_grps ? in __bnxt_hwrm_reserve_vf_rings()
[all …]

12