Lines Matching refs:txq

53 	const struct netdev_queue *txq = q->dev_queue;  in __skb_dequeue_bad_txq()  local
65 txq = skb_get_tx_queue(txq->dev, skb); in __skb_dequeue_bad_txq()
66 if (!netif_xmit_frozen_or_stopped(txq)) { in __skb_dequeue_bad_txq()
154 const struct netdev_queue *txq, in try_bulk_dequeue_skb() argument
157 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; in try_bulk_dequeue_skb()
205 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb() local
233 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb()
234 if (!netif_xmit_frozen_or_stopped(txq)) { in dequeue_skb()
254 netif_xmit_frozen_or_stopped(txq)) in dequeue_skb()
267 try_bulk_dequeue_skb(q, skb, txq, packets); in dequeue_skb()
272 trace_qdisc_dequeue(q, txq, *packets, skb); in dequeue_skb()
286 struct net_device *dev, struct netdev_queue *txq, in sch_direct_xmit() argument
311 HARD_TX_LOCK(dev, txq, smp_processor_id()); in sch_direct_xmit()
312 if (!netif_xmit_frozen_or_stopped(txq)) in sch_direct_xmit()
313 skb = dev_hard_start_xmit(skb, dev, txq, &ret); in sch_direct_xmit()
315 HARD_TX_UNLOCK(dev, txq); in sch_direct_xmit()
360 struct netdev_queue *txq; in qdisc_restart() local
374 txq = skb_get_tx_queue(dev, skb); in qdisc_restart()
376 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); in qdisc_restart()
432 struct netdev_queue *txq; in dev_watchdog() local
434 txq = netdev_get_tx_queue(dev, i); in dev_watchdog()
435 trans_start = txq->trans_start; in dev_watchdog()
436 if (netif_xmit_stopped(txq) && in dev_watchdog()
440 txq->trans_timeout++; in dev_watchdog()
1056 struct netdev_queue *txq; in attach_default_qdiscs() local
1059 txq = netdev_get_tx_queue(dev, 0); in attach_default_qdiscs()
1064 dev->qdisc = txq->qdisc_sleeping; in attach_default_qdiscs()
1067 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); in attach_default_qdiscs()