Lines Matching refs:VHOST_NET_VQ_TX
85 VHOST_NET_VQ_TX = 1, enumerator
601 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in vhost_exceeds_maxpend()
770 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx_copy()
860 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx_zerocopy()
964 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; in handle_tx()
968 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); in handle_tx()
1013 struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; in vhost_net_rx_peek_head_len()
1276 poll[VHOST_NET_VQ_TX].work); in handle_tx_net()
1321 n->vqs[VHOST_NET_VQ_TX].xdp = xdp; in vhost_net_open()
1324 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; in vhost_net_open()
1326 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; in vhost_net_open()
1344 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); in vhost_net_open()
1374 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); in vhost_net_stop()
1381 if (n->vqs[VHOST_NET_VQ_TX].ubufs) { in vhost_net_flush()
1382 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1384 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1386 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); in vhost_net_flush()
1387 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1389 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); in vhost_net_flush()
1390 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); in vhost_net_flush()
1415 kfree(n->vqs[VHOST_NET_VQ_TX].xdp); in vhost_net_release()
1788 vhost_net_enable_zcopy(VHOST_NET_VQ_TX); in vhost_net_init()