Lines Matching full:tx

47 	q->tx.shmem = (void *)tx_shmem;  in eth_ivshmem_queue_init()
51 vring_init(&q->tx.vring, vring_desc_len, q->tx.shmem, ETH_IVSHMEM_VRING_ALIGNMENT); in eth_ivshmem_queue_init()
58 struct vring_used *tmp_used = q->tx.vring.used; in eth_ivshmem_queue_init()
60 q->tx.vring.used = q->rx.vring.used; in eth_ivshmem_queue_init()
70 q->tx.desc_head = 0; in eth_ivshmem_queue_reset()
71 q->tx.desc_len = 0; in eth_ivshmem_queue_reset()
72 q->tx.data_head = 0; in eth_ivshmem_queue_reset()
73 q->tx.data_tail = 0; in eth_ivshmem_queue_reset()
74 q->tx.data_len = 0; in eth_ivshmem_queue_reset()
75 q->tx.avail_idx = 0; in eth_ivshmem_queue_reset()
76 q->tx.used_idx = 0; in eth_ivshmem_queue_reset()
77 q->tx.pending_data_head = 0; in eth_ivshmem_queue_reset()
78 q->tx.pending_data_len = 0; in eth_ivshmem_queue_reset()
82 memset(q->tx.shmem, 0, q->vring_header_size); in eth_ivshmem_queue_reset()
84 /* Init TX ring descriptors */ in eth_ivshmem_queue_reset()
85 for (unsigned int i = 0; i < q->tx.vring.num - 1; i++) { in eth_ivshmem_queue_reset()
86 q->tx.vring.desc[i].next = i + 1; in eth_ivshmem_queue_reset()
88 q->tx.vring.desc[q->tx.vring.num - 1].next = 0; in eth_ivshmem_queue_reset()
93 /* Clean used TX buffers */ in eth_ivshmem_queue_tx_get_buff()
100 if (q->tx.desc_len >= q->desc_max_len) { in eth_ivshmem_queue_tx_get_buff()
104 uint32_t head = q->tx.data_head; in eth_ivshmem_queue_tx_get_buff()
108 if (q->vring_data_max_len - q->tx.data_len < consumed_len) { in eth_ivshmem_queue_tx_get_buff()
112 struct vring_desc *tx_desc = &q->tx.vring.desc[q->tx.desc_head]; in eth_ivshmem_queue_tx_get_buff()
119 *data = (uint8_t *)q->tx.shmem + q->vring_header_size + head; in eth_ivshmem_queue_tx_get_buff()
121 q->tx.pending_data_head = new_head; in eth_ivshmem_queue_tx_get_buff()
122 q->tx.pending_data_len = q->tx.data_len + consumed_len; in eth_ivshmem_queue_tx_get_buff()
129 /* Ensure that a TX buffer is pending */ in eth_ivshmem_queue_tx_commit_buff()
130 if (q->tx.pending_data_len == 0) { in eth_ivshmem_queue_tx_commit_buff()
134 uint16_t desc_head = q->tx.desc_head; in eth_ivshmem_queue_tx_commit_buff()
136 q->tx.desc_len++; in eth_ivshmem_queue_tx_commit_buff()
137 q->tx.desc_head = (q->tx.desc_head + 1) % q->desc_max_len; in eth_ivshmem_queue_tx_commit_buff()
139 q->tx.data_head = q->tx.pending_data_head; in eth_ivshmem_queue_tx_commit_buff()
140 q->tx.data_len = q->tx.pending_data_len; in eth_ivshmem_queue_tx_commit_buff()
142 q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len] = desc_head; in eth_ivshmem_queue_tx_commit_buff()
144 VRING_FLUSH(q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len]); in eth_ivshmem_queue_tx_commit_buff()
147 q->tx.avail_idx++; in eth_ivshmem_queue_tx_commit_buff()
148 q->tx.vring.avail->idx = q->tx.avail_idx; in eth_ivshmem_queue_tx_commit_buff()
150 VRING_FLUSH(q->tx.vring.avail->idx); in eth_ivshmem_queue_tx_commit_buff()
152 q->tx.pending_data_len = 0; in eth_ivshmem_queue_tx_commit_buff()
270 VRING_INVALIDATE(q->tx.vring.used->idx); in tx_clean_used()
271 if (q->tx.used_idx == q->tx.vring.used->idx) { in tx_clean_used()
275 struct vring_used_elem *used = &q->tx.vring.used->ring[ in tx_clean_used()
276 q->tx.used_idx % q->desc_max_len]; in tx_clean_used()
285 struct vring_desc *desc = &q->tx.vring.desc[used->id]; in tx_clean_used()
290 uint32_t tail = q->tx.data_tail; in tx_clean_used()
294 if (consumed_len > q->tx.data_len || in tx_clean_used()
299 q->tx.data_tail = new_tail; in tx_clean_used()
300 q->tx.data_len -= consumed_len; in tx_clean_used()
301 q->tx.desc_len--; in tx_clean_used()
302 q->tx.used_idx++; in tx_clean_used()