/Linux-v4.19/drivers/net/ethernet/huawei/hinic/ |
D | hinic_tx.c | 56 void hinic_txq_clean_stats(struct hinic_txq *txq) in hinic_txq_clean_stats() argument 58 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_clean_stats() 74 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) in hinic_txq_get_stats() argument 76 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in hinic_txq_get_stats() 95 static void txq_stats_init(struct hinic_txq *txq) in txq_stats_init() argument 97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; in txq_stats_init() 100 hinic_txq_clean_stats(txq); in txq_stats_init() 185 struct hinic_txq *txq; in hinic_xmit_frame() local 189 txq = &nic_dev->txqs[skb->queue_mapping]; in hinic_xmit_frame() 190 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame() [all …]
|
D | hinic_tx.h | 51 void hinic_txq_clean_stats(struct hinic_txq *txq); 53 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); 57 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, 60 void hinic_clean_txq(struct hinic_txq *txq);
|
/Linux-v4.19/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | tx.c | 154 struct iwl_txq *txq = from_timer(txq, t, stuck_timer); in iwl_pcie_txq_stuck_timer() local 155 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; in iwl_pcie_txq_stuck_timer() 158 spin_lock(&txq->lock); in iwl_pcie_txq_stuck_timer() 160 if (txq->read_ptr == txq->write_ptr) { in iwl_pcie_txq_stuck_timer() 161 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer() 164 spin_unlock(&txq->lock); in iwl_pcie_txq_stuck_timer() 166 iwl_trans_pcie_log_scd_error(trans, txq); in iwl_pcie_txq_stuck_timer() 175 struct iwl_txq *txq, u16 byte_cnt, in iwl_pcie_txq_update_byte_cnt_tbl() argument 180 int write_ptr = txq->write_ptr; in iwl_pcie_txq_update_byte_cnt_tbl() 181 int txq_id = txq->id; in iwl_pcie_txq_update_byte_cnt_tbl() [all …]
|
D | tx-gen2.c | 80 for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { in iwl_pcie_gen2_tx_stop() 81 if (!trans_pcie->txq[txq_id]) in iwl_pcie_gen2_tx_stop() 91 struct iwl_txq *txq, u16 byte_cnt, in iwl_pcie_gen2_update_byte_tbl() argument 94 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl() 96 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl() 97 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_gen2_update_byte_tbl() 105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) in iwl_pcie_gen2_update_byte_tbl() 131 struct iwl_txq *txq) in iwl_pcie_gen2_txq_inc_wr_ptr() argument 133 lockdep_assert_held(&txq->lock); in iwl_pcie_gen2_txq_inc_wr_ptr() 135 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); in iwl_pcie_gen2_txq_inc_wr_ptr() [all …]
|
/Linux-v4.19/drivers/net/wireless/ath/ath9k/ |
D | xmit.c | 50 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 53 int tx_flags, struct ath_txq *txq, 56 struct ath_txq *txq, struct list_head *bf_q, 59 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 67 struct ath_txq *txq, 101 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) in ath_txq_unlock_complete() argument 102 __releases(&txq->axq_lock) in ath_txq_unlock_complete() 109 skb_queue_splice_init(&txq->complete_q, &q); in ath_txq_unlock_complete() 110 spin_unlock_bh(&txq->axq_lock); in ath_txq_unlock_complete() 159 struct ath_txq *txq = tid->txq; in ath9k_wake_tx_queue() local [all …]
|
/Linux-v4.19/drivers/net/ethernet/marvell/ |
D | mv643xx_eth.c | 190 #define IS_TSO_HEADER(txq, addr) \ argument 191 ((addr >= txq->tso_hdrs_dma) && \ 192 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 419 struct tx_queue txq[8]; member 457 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) in txq_to_mp() argument 459 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp() 478 static void txq_reset_hw_ptr(struct tx_queue *txq) in txq_reset_hw_ptr() argument 480 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr() 483 addr = (u32)txq->tx_desc_dma; in txq_reset_hw_ptr() 484 addr += txq->tx_curr_desc * sizeof(struct tx_desc); in txq_reset_hw_ptr() [all …]
|
D | mvneta.c | 125 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument 322 #define IS_TSO_HEADER(txq, addr) \ argument 323 ((addr >= txq->tso_hdrs_phys) && \ 324 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 663 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) in mvneta_txq_inc_get() argument 665 txq->txq_get_index++; in mvneta_txq_inc_get() 666 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get() 667 txq->txq_get_index = 0; in mvneta_txq_inc_get() 671 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) in mvneta_txq_inc_put() argument 673 txq->txq_put_index++; in mvneta_txq_inc_put() [all …]
|
/Linux-v4.19/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 100 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) in qede_free_tx_pkt() argument 102 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt() 103 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt() 108 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt() 114 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt() 120 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 128 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 138 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 144 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt() 148 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt() [all …]
|
D | qede_main.c | 876 kfree(fp->txq); in qede_free_fp_array() 926 fp->txq = kcalloc(edev->dev_info.num_tc, in qede_alloc_fp_array() 927 sizeof(*fp->txq), GFP_KERNEL); in qede_alloc_fp_array() 928 if (!fp->txq) in qede_alloc_fp_array() 1404 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) in qede_free_mem_txq() argument 1407 if (txq->is_xdp) in qede_free_mem_txq() 1408 kfree(txq->sw_tx_ring.xdp); in qede_free_mem_txq() 1410 kfree(txq->sw_tx_ring.skbs); in qede_free_mem_txq() 1413 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); in qede_free_mem_txq() 1417 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) in qede_alloc_mem_txq() argument [all …]
|
/Linux-v4.19/drivers/net/ethernet/atheros/alx/ |
D | main.c | 54 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) in alx_free_txbuf() argument 56 struct alx_buffer *txb = &txq->bufs[entry]; in alx_free_txbuf() 59 dma_unmap_single(txq->dev, in alx_free_txbuf() 150 return alx->qnapi[r_idx]->txq; in alx_tx_queue_mapping() 153 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq) in alx_get_tx_queue() argument 155 return netdev_get_tx_queue(txq->netdev, txq->queue_idx); in alx_get_tx_queue() 158 static inline int alx_tpd_avail(struct alx_tx_queue *txq) in alx_tpd_avail() argument 160 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail() 161 return txq->count + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() 162 return txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() [all …]
|
/Linux-v4.19/drivers/net/wireless/mediatek/mt76/ |
D | tx.c | 86 mt76_txq_get_qid(struct ieee80211_txq *txq) in mt76_txq_get_qid() argument 88 if (!txq->sta) in mt76_txq_get_qid() 91 return txq->ac; in mt76_txq_get_qid() 126 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); in mt76_txq_dequeue() local 134 ieee80211_sta_set_buffered(txq->sta, tid, false); in mt76_txq_dequeue() 139 skb = ieee80211_tx_dequeue(dev->hw, txq); in mt76_txq_dequeue() 186 struct ieee80211_txq *txq = sta->txq[i]; in mt76_release_buffered_frames() local 187 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; in mt76_release_buffered_frames() 221 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); in mt76_txq_send_burst() local 239 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, in mt76_txq_send_burst() [all …]
|
D | mt76x2_common.c | 20 void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq) in mt76x2_txq_init() argument 24 if (!txq) in mt76x2_txq_init() 27 mtxq = (struct mt76_txq *) txq->drv_priv; in mt76x2_txq_init() 28 if (txq->sta) { in mt76x2_txq_init() 31 sta = (struct mt76x2_sta *) txq->sta->drv_priv; in mt76x2_txq_init() 36 mvif = (struct mt76x2_vif *) txq->vif->drv_priv; in mt76x2_txq_init() 40 mt76_txq_init(&dev->mt76, txq); in mt76x2_txq_init() 51 struct ieee80211_txq *txq = sta->txq[params->tid]; in mt76x2_ampdu_action() local 56 if (!txq) in mt76x2_ampdu_action() 59 mtxq = (struct mt76_txq *)txq->drv_priv; in mt76x2_ampdu_action() [all …]
|
/Linux-v4.19/include/trace/events/ |
D | qdisc.h | 14 TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, 17 TP_ARGS(qdisc, txq, packets, skb), 21 __field(const struct netdev_queue *, txq ) 33 __entry->txq = txq; 36 __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; 39 __entry->txq_state = txq->state;
|
/Linux-v4.19/drivers/net/ethernet/freescale/ |
D | fec_main.c | 238 #define IS_TSO_HEADER(txq, addr) \ argument 239 ((addr >= txq->tso_hdrs_dma) && \ 240 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 264 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument 268 entries = (((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num() 269 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num() 271 return entries >= 0 ? entries : entries + txq->bd.ring_size; in fec_enet_get_free_txdesc_num() 297 struct fec_enet_priv_tx_q *txq; in fec_dump() local 303 txq = fep->tx_queue[0]; in fec_dump() 304 bdp = txq->bd.base; in fec_dump() [all …]
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1133 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument 1135 netif_tx_stop_queue(txq->txq); in txq_stop() 1136 txq->q.stops++; in txq_stop() 1164 struct sge_eth_txq *txq; in t4vf_eth_xmit() local 1198 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1208 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit() 1217 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit() 1226 txq_stop(txq); in t4vf_eth_xmit() 1240 txq->mapping_err++; in t4vf_eth_xmit() 1255 txq_stop(txq); in t4vf_eth_xmit() [all …]
|
/Linux-v4.19/drivers/net/ethernet/hisilicon/ |
D | hisi_femac.c | 132 struct hisi_femac_queue txq; member 159 dma_addr = priv->txq.dma_phys[pos]; in hisi_femac_tx_dma_unmap() 167 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_xmit_reclaim() local 175 skb = txq->skb[txq->tail]; in hisi_femac_xmit_reclaim() 181 hisi_femac_tx_dma_unmap(priv, skb, txq->tail); in hisi_femac_xmit_reclaim() 189 txq->skb[txq->tail] = NULL; in hisi_femac_xmit_reclaim() 190 txq->tail = (txq->tail + 1) % txq->num; in hisi_femac_xmit_reclaim() 384 ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); in hisi_femac_init_tx_and_rx_queues() 399 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_free_skb_rings() local 424 pos = txq->tail; in hisi_femac_free_skb_rings() [all …]
|
/Linux-v4.19/drivers/net/wireless/ath/ath5k/ |
D | base.c | 733 struct ath5k_txq *txq, int padsize, in ath5k_txbuf_setup() argument 830 spin_lock_bh(&txq->lock); in ath5k_txbuf_setup() 831 list_add_tail(&bf->list, &txq->q); in ath5k_txbuf_setup() 832 txq->txq_len++; in ath5k_txbuf_setup() 833 if (txq->link == NULL) /* is this first packet? */ in ath5k_txbuf_setup() 834 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); in ath5k_txbuf_setup() 836 *txq->link = bf->daddr; in ath5k_txbuf_setup() 838 txq->link = &ds->ds_link; in ath5k_txbuf_setup() 839 ath5k_hw_start_tx_dma(ah, txq->qnum); in ath5k_txbuf_setup() 841 spin_unlock_bh(&txq->lock); in ath5k_txbuf_setup() [all …]
|
/Linux-v4.19/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_main.c | 201 unsigned int txq) in mvpp2_txdesc_txq_set() argument 204 tx_desc->pp21.phys_txq = txq; in mvpp2_txdesc_txq_set() 206 tx_desc->pp22.phys_txq = txq; in mvpp2_txdesc_txq_set() 296 static inline int mvpp2_txq_phys(int port, int txq) in mvpp2_txq_phys() argument 298 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; in mvpp2_txq_phys() 1505 struct mvpp2_tx_queue *txq = port->txqs[queue]; in mvpp2_egress_enable() local 1507 if (txq->descs) in mvpp2_egress_enable() 1611 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) in mvpp2_txq_next_desc_get() argument 1613 int tx_desc = txq->next_desc_to_proc; in mvpp2_txq_next_desc_get() 1615 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); in mvpp2_txq_next_desc_get() [all …]
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset() 658 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset() 695 if (q->txq[i].desc) { in t3_free_qset() 697 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset() 699 if (q->txq[i].sdesc) { in t3_free_qset() 700 free_tx_desc(adapter, &q->txq[i], in t3_free_qset() 701 q->txq[i].in_use); in t3_free_qset() 702 kfree(q->txq[i].sdesc); in t3_free_qset() 705 q->txq[i].size * in t3_free_qset() 707 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset() [all …]
|
/Linux-v4.19/net/core/ |
D | netpoll.c | 73 struct netdev_queue *txq) in netpoll_start_xmit() argument 92 status = netdev_start_xmit(skb, dev, txq, false); in netpoll_start_xmit() 105 while ((skb = skb_dequeue(&npinfo->txq))) { in queue_process() 107 struct netdev_queue *txq; in queue_process() local 122 txq = netdev_get_tx_queue(dev, q_index); in queue_process() 123 HARD_TX_LOCK(dev, txq, smp_processor_id()); in queue_process() 124 if (netif_xmit_frozen_or_stopped(txq) || in queue_process() 125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { in queue_process() 126 skb_queue_head(&npinfo->txq, skb); in queue_process() 127 HARD_TX_UNLOCK(dev, txq); in queue_process() [all …]
|
/Linux-v4.19/drivers/atm/ |
D | ambassador.c | 628 amb_txq * txq = &dev->txq; in tx_give() local 636 spin_lock_irqsave (&txq->lock, flags); in tx_give() 638 if (txq->pending < txq->maximum) { in tx_give() 639 PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); in tx_give() 641 *txq->in.ptr = *tx; in tx_give() 642 txq->pending++; in tx_give() 643 txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); in tx_give() 645 wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); in tx_give() 648 if (txq->pending > txq->high) in tx_give() 649 txq->high = txq->pending; in tx_give() [all …]
|
/Linux-v4.19/drivers/net/wireless/intel/iwlegacy/ |
D | common.c | 382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; in il_send_cmd_sync() 2726 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) in il_txq_update_write_ptr() argument 2729 int txq_id = txq->q.id; in il_txq_update_write_ptr() 2731 if (txq->need_update == 0) in il_txq_update_write_ptr() 2749 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2757 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); in il_txq_update_write_ptr() 2758 txq->need_update = 0; in il_txq_update_write_ptr() 2768 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local 2769 struct il_queue *q = &txq->q; in il_tx_queue_unmap() 2775 il->ops->txq_free_tfd(il, txq); in il_tx_queue_unmap() [all …]
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 1147 netif_tx_stop_queue(q->txq); in eth_txq_stop() 1640 struct sge_eth_txq *txq; in cxgb4_vf_eth_xmit() local 1669 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit() 1674 cxgb4_reclaim_completed_tx(adapter, &txq->q, true); in cxgb4_vf_eth_xmit() 1682 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit() 1690 eth_txq_stop(txq); in cxgb4_vf_eth_xmit() 1703 txq->mapping_err++; in cxgb4_vf_eth_xmit() 1717 eth_txq_stop(txq); in cxgb4_vf_eth_xmit() 1727 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit() 1779 txq->tso++; in cxgb4_vf_eth_xmit() [all …]
|
/Linux-v4.19/net/sched/ |
D | sch_generic.c | 54 const struct netdev_queue *txq = q->dev_queue; in __skb_dequeue_bad_txq() local 66 txq = skb_get_tx_queue(txq->dev, skb); in __skb_dequeue_bad_txq() 67 if (!netif_xmit_frozen_or_stopped(txq)) { in __skb_dequeue_bad_txq() 171 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument 174 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb() 222 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local 250 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb() 251 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb() 271 netif_xmit_frozen_or_stopped(txq)) in dequeue_skb() 281 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb() [all …]
|
/Linux-v4.19/drivers/net/ethernet/brocade/bna/ |
D | bna_tx_rx.c | 2878 struct bna_txq *txq; in bna_tx_sm_started_entry() local 2881 list_for_each_entry(txq, &tx->txq_q, qe) { in bna_tx_sm_started_entry() 2882 txq->tcb->priority = txq->priority; in bna_tx_sm_started_entry() 2884 bna_ib_start(tx->bna, &txq->ib, is_regular); in bna_tx_sm_started_entry() 3097 struct bna_txq *txq = NULL; in bna_bfi_tx_enet_start() local 3107 txq = txq ? list_next_entry(txq, qe) in bna_bfi_tx_enet_start() 3109 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); in bna_bfi_tx_enet_start() 3110 cfg_req->q_cfg[i].q.priority = txq->priority; in bna_bfi_tx_enet_start() 3113 txq->ib.ib_seg_host_addr.lsb; in bna_bfi_tx_enet_start() 3115 txq->ib.ib_seg_host_addr.msb; in bna_bfi_tx_enet_start() [all …]
|