Lines Matching defs:txr
357 struct bnxt_tx_ring_info *txr;
367 txr = &bp->tx_ring[bp->tx_ring_map[i]];
368 prod = txr->tx_prod;
370 free_size = bnxt_tx_avail(bp, txr);
380 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
384 tx_buf = &txr->tx_buf_ring[prod];
401 struct tx_push_buffer *tx_push_buf = txr->tx_push;
404 void __iomem *db = txr->tx_db.doorbell;
447 txbd->tx_bd_haddr = txr->data_mapping;
449 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
454 txr->tx_prod = prod;
499 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
542 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
551 tx_buf = &txr->tx_buf_ring[prod];
571 txr->tx_prod = prod;
574 bnxt_db_write(bp, &txr->tx_db, prod);
578 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
580 bnxt_db_write(bp, &txr->tx_db, prod);
590 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
599 prod = txr->tx_prod;
600 tx_buf = &txr->tx_buf_ring[prod];
609 tx_buf = &txr->tx_buf_ring[prod];
621 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
622 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
623 u16 cons = txr->tx_cons;
633 tx_buf = &txr->tx_buf_ring[cons];
649 tx_buf = &txr->tx_buf_ring[cons];
665 txr->tx_cons = cons;
675 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
678 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
679 txr->dev_state != BNXT_DEV_STATE_CLOSING)
2216 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2217 u16 prod = txr->tx_prod;
2222 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2475 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2479 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2521 tx_buf = &txr->tx_buf_ring[ring_idx];
2881 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2884 if (txr->tx_push) {
2886 txr->tx_push, txr->tx_push_mapping);
2887 txr->tx_push = NULL;
2890 ring = &txr->tx_ring_struct;
2917 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2921 ring = &txr->tx_ring_struct;
2927 ring->grp_idx = txr->bnapi->index;
2934 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2936 &txr->tx_push_mapping,
2939 if (!txr->tx_push)
2942 mapping = txr->tx_push_mapping +
2944 txr->data_mapping = cpu_to_le64(mapping);
3080 struct bnxt_tx_ring_info *txr;
3118 txr = bnapi->tx_ring;
3119 if (!txr)
3122 ring = &txr->tx_ring_struct;
3126 rmem->pg_arr = (void **)txr->tx_desc_ring;
3127 rmem->dma_arr = txr->tx_desc_mapping;
3129 rmem->vmem = (void **)&txr->tx_buf_ring;
3294 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3295 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3852 struct bnxt_tx_ring_info *txr;
3860 txr = bnapi->tx_ring;
3861 if (txr) {
3862 txr->tx_prod = 0;
3863 txr->tx_cons = 0;
4020 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4023 txr->tx_ring_struct.ring_mem.flags =
4025 txr->bnapi = bp->bnapi[j];
4026 bp->bnapi[j]->tx_ring = txr;
4029 txr->txq_index = i - bp->tx_nr_rings_xdp;
4809 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4812 struct bnxt_napi *bnapi = txr->bnapi;
4818 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5236 struct bnxt_tx_ring_info *txr;
5238 txr = container_of(ring, struct bnxt_tx_ring_info,
5243 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5426 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5431 struct bnxt_napi *bnapi = txr->bnapi;
5447 ring = &txr->tx_ring_struct;
5452 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5549 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5550 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5553 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
8329 struct bnxt_tx_ring_info *txr;
8333 txr = &bp->tx_ring[i];
8334 txr->dev_state = BNXT_DEV_STATE_CLOSING;
8345 struct bnxt_tx_ring_info *txr;
8348 txr = &bp->tx_ring[i];
8349 txr->dev_state = 0;
9874 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9877 if (!txr)
9881 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9882 txr->tx_cons);