Lines Matching refs:txq

54 	const struct netdev_queue *txq = q->dev_queue;  in __skb_dequeue_bad_txq()  local
66 txq = skb_get_tx_queue(txq->dev, skb); in __skb_dequeue_bad_txq()
67 if (!netif_xmit_frozen_or_stopped(txq)) { in __skb_dequeue_bad_txq()
171 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument
174 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb()
222 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local
250 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
251 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
271 netif_xmit_frozen_or_stopped(txq)) in dequeue_skb()
281 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb()
286 trace_qdisc_dequeue(q, txq, *packets, skb); in dequeue_skb()
300 struct net_device *dev, struct netdev_queue *txq, in sch_direct_xmit() argument
325 HARD_TX_LOCK(dev, txq, smp_processor_id()); in sch_direct_xmit()
326 if (!netif_xmit_frozen_or_stopped(txq)) in sch_direct_xmit()
327 skb = dev_hard_start_xmit(skb, dev, txq, &ret); in sch_direct_xmit()
329 HARD_TX_UNLOCK(dev, txq); in sch_direct_xmit()
374 struct netdev_queue *txq; in qdisc_restart() local
388 txq = skb_get_tx_queue(dev, skb); in qdisc_restart()
390 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); in qdisc_restart()
446 struct netdev_queue *txq; in dev_watchdog() local
448 txq = netdev_get_tx_queue(dev, i); in dev_watchdog()
449 trans_start = txq->trans_start; in dev_watchdog()
450 if (netif_xmit_stopped(txq) && in dev_watchdog()
454 txq->trans_timeout++; in dev_watchdog()
1025 struct netdev_queue *txq; in attach_default_qdiscs() local
1028 txq = netdev_get_tx_queue(dev, 0); in attach_default_qdiscs()
1033 dev->qdisc = txq->qdisc_sleeping; in attach_default_qdiscs()
1036 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); in attach_default_qdiscs()