Lines Matching +full:tx +full:- +full:rx +full:- +full:swap

4  * SPDX-License-Identifier: Apache-2.0
44 q->desc_max_len = vring_desc_len; in eth_ivshmem_queue_init()
45 q->vring_data_max_len = shmem_section_size - vring_header_size; in eth_ivshmem_queue_init()
46 q->vring_header_size = vring_header_size; in eth_ivshmem_queue_init()
47 q->tx.shmem = (void *)tx_shmem; in eth_ivshmem_queue_init()
48 q->rx.shmem = (void *)rx_shmem; in eth_ivshmem_queue_init()
51 vring_init(&q->tx.vring, vring_desc_len, q->tx.shmem, ETH_IVSHMEM_VRING_ALIGNMENT); in eth_ivshmem_queue_init()
52 vring_init(&q->rx.vring, vring_desc_len, q->rx.shmem, ETH_IVSHMEM_VRING_ALIGNMENT); in eth_ivshmem_queue_init()
54 /* Swap "used" pointers. in eth_ivshmem_queue_init()
58 struct vring_used *tmp_used = q->tx.vring.used; in eth_ivshmem_queue_init()
60 q->tx.vring.used = q->rx.vring.used; in eth_ivshmem_queue_init()
61 q->rx.vring.used = tmp_used; in eth_ivshmem_queue_init()
70 q->tx.desc_head = 0; in eth_ivshmem_queue_reset()
71 q->tx.desc_len = 0; in eth_ivshmem_queue_reset()
72 q->tx.data_head = 0; in eth_ivshmem_queue_reset()
73 q->tx.data_tail = 0; in eth_ivshmem_queue_reset()
74 q->tx.data_len = 0; in eth_ivshmem_queue_reset()
75 q->tx.avail_idx = 0; in eth_ivshmem_queue_reset()
76 q->tx.used_idx = 0; in eth_ivshmem_queue_reset()
77 q->tx.pending_data_head = 0; in eth_ivshmem_queue_reset()
78 q->tx.pending_data_len = 0; in eth_ivshmem_queue_reset()
79 q->rx.avail_idx = 0; in eth_ivshmem_queue_reset()
80 q->rx.used_idx = 0; in eth_ivshmem_queue_reset()
82 memset(q->tx.shmem, 0, q->vring_header_size); in eth_ivshmem_queue_reset()
84 /* Init TX ring descriptors */ in eth_ivshmem_queue_reset()
85 for (unsigned int i = 0; i < q->tx.vring.num - 1; i++) { in eth_ivshmem_queue_reset()
86 q->tx.vring.desc[i].next = i + 1; in eth_ivshmem_queue_reset()
88 q->tx.vring.desc[q->tx.vring.num - 1].next = 0; in eth_ivshmem_queue_reset()
93 /* Clean used TX buffers */ in eth_ivshmem_queue_tx_get_buff()
100 if (q->tx.desc_len >= q->desc_max_len) { in eth_ivshmem_queue_tx_get_buff()
101 return -ENOBUFS; in eth_ivshmem_queue_tx_get_buff()
104 uint32_t head = q->tx.data_head; in eth_ivshmem_queue_tx_get_buff()
106 uint32_t new_head = tx_buffer_advance(q->vring_data_max_len, &head, &consumed_len); in eth_ivshmem_queue_tx_get_buff()
108 if (q->vring_data_max_len - q->tx.data_len < consumed_len) { in eth_ivshmem_queue_tx_get_buff()
109 return -ENOBUFS; in eth_ivshmem_queue_tx_get_buff()
112 struct vring_desc *tx_desc = &q->tx.vring.desc[q->tx.desc_head]; in eth_ivshmem_queue_tx_get_buff()
114 tx_desc->addr = q->vring_header_size + head; in eth_ivshmem_queue_tx_get_buff()
115 tx_desc->len = len; in eth_ivshmem_queue_tx_get_buff()
116 tx_desc->flags = 0; in eth_ivshmem_queue_tx_get_buff()
119 *data = (uint8_t *)q->tx.shmem + q->vring_header_size + head; in eth_ivshmem_queue_tx_get_buff()
121 q->tx.pending_data_head = new_head; in eth_ivshmem_queue_tx_get_buff()
122 q->tx.pending_data_len = q->tx.data_len + consumed_len; in eth_ivshmem_queue_tx_get_buff()
129 /* Ensure that a TX buffer is pending */ in eth_ivshmem_queue_tx_commit_buff()
130 if (q->tx.pending_data_len == 0) { in eth_ivshmem_queue_tx_commit_buff()
131 return -EINVAL; in eth_ivshmem_queue_tx_commit_buff()
134 uint16_t desc_head = q->tx.desc_head; in eth_ivshmem_queue_tx_commit_buff()
136 q->tx.desc_len++; in eth_ivshmem_queue_tx_commit_buff()
137 q->tx.desc_head = (q->tx.desc_head + 1) % q->desc_max_len; in eth_ivshmem_queue_tx_commit_buff()
139 q->tx.data_head = q->tx.pending_data_head; in eth_ivshmem_queue_tx_commit_buff()
140 q->tx.data_len = q->tx.pending_data_len; in eth_ivshmem_queue_tx_commit_buff()
142 q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len] = desc_head; in eth_ivshmem_queue_tx_commit_buff()
144 VRING_FLUSH(q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len]); in eth_ivshmem_queue_tx_commit_buff()
147 q->tx.avail_idx++; in eth_ivshmem_queue_tx_commit_buff()
148 q->tx.vring.avail->idx = q->tx.avail_idx; in eth_ivshmem_queue_tx_commit_buff()
150 VRING_FLUSH(q->tx.vring.avail->idx); in eth_ivshmem_queue_tx_commit_buff()
152 q->tx.pending_data_len = 0; in eth_ivshmem_queue_tx_commit_buff()
169 struct vring_desc *desc = &q->rx.vring.desc[avail_desc_idx]; in eth_ivshmem_queue_rx()
173 uint64_t offset = desc->addr - q->vring_header_size; in eth_ivshmem_queue_rx()
174 uint32_t rx_len = desc->len; in eth_ivshmem_queue_rx()
176 if (offset > q->vring_data_max_len || in eth_ivshmem_queue_rx()
177 rx_len > q->vring_data_max_len || in eth_ivshmem_queue_rx()
178 offset > q->vring_data_max_len - rx_len) { in eth_ivshmem_queue_rx()
179 return -EINVAL; in eth_ivshmem_queue_rx()
182 *data = (uint8_t *)q->rx.shmem + q->vring_header_size + offset; in eth_ivshmem_queue_rx()
183 *len = desc->len; in eth_ivshmem_queue_rx()
197 uint16_t used_idx = q->rx.used_idx % q->desc_max_len; in eth_ivshmem_queue_rx_complete()
199 q->rx.used_idx++; in eth_ivshmem_queue_rx_complete()
200 q->rx.vring.used->ring[used_idx].id = avail_desc_idx; in eth_ivshmem_queue_rx_complete()
201 q->rx.vring.used->ring[used_idx].len = 1; in eth_ivshmem_queue_rx_complete()
202 VRING_FLUSH(q->rx.vring.used->ring[used_idx]); in eth_ivshmem_queue_rx_complete()
205 q->rx.vring.used->idx = q->rx.used_idx; in eth_ivshmem_queue_rx_complete()
206 VRING_FLUSH(q->rx.vring.used->idx); in eth_ivshmem_queue_rx_complete()
209 q->rx.avail_idx++; in eth_ivshmem_queue_rx_complete()
210 vring_avail_event(&q->rx.vring) = q->rx.avail_idx; in eth_ivshmem_queue_rx_complete()
211 VRING_FLUSH(vring_avail_event(&q->rx.vring)); in eth_ivshmem_queue_rx_complete()
237 return -EINVAL; in calc_vring_size()
240 uint32_t vring_data_size = section_size - header_size; in calc_vring_size()
243 return -EINVAL; in calc_vring_size()
255 uint32_t contiguous_len = max_len - *position; in tx_buffer_advance()
270 VRING_INVALIDATE(q->tx.vring.used->idx); in tx_clean_used()
271 if (q->tx.used_idx == q->tx.vring.used->idx) { in tx_clean_used()
275 struct vring_used_elem *used = &q->tx.vring.used->ring[ in tx_clean_used()
276 q->tx.used_idx % q->desc_max_len]; in tx_clean_used()
281 if (used->id >= q->desc_max_len || used->len != 1) { in tx_clean_used()
282 return -EINVAL; in tx_clean_used()
285 struct vring_desc *desc = &q->tx.vring.desc[used->id]; in tx_clean_used()
287 uint64_t offset = desc->addr - q->vring_header_size; in tx_clean_used()
288 uint32_t len = desc->len; in tx_clean_used()
290 uint32_t tail = q->tx.data_tail; in tx_clean_used()
292 uint32_t new_tail = tx_buffer_advance(q->vring_data_max_len, &tail, &consumed_len); in tx_clean_used()
294 if (consumed_len > q->tx.data_len || in tx_clean_used()
296 return -EINVAL; in tx_clean_used()
299 q->tx.data_tail = new_tail; in tx_clean_used()
300 q->tx.data_len -= consumed_len; in tx_clean_used()
301 q->tx.desc_len--; in tx_clean_used()
302 q->tx.used_idx++; in tx_clean_used()
310 VRING_INVALIDATE(q->rx.vring.avail->idx); in get_rx_avail_desc_idx()
312 uint16_t avail_idx = q->rx.vring.avail->idx; in get_rx_avail_desc_idx()
314 if (avail_idx == q->rx.avail_idx) { in get_rx_avail_desc_idx()
315 return -EWOULDBLOCK; in get_rx_avail_desc_idx()
318 VRING_INVALIDATE(q->rx.vring.avail->ring[q->rx.avail_idx % q->desc_max_len]); in get_rx_avail_desc_idx()
319 *avail_desc_idx = q->rx.vring.avail->ring[q->rx.avail_idx % q->desc_max_len]; in get_rx_avail_desc_idx()
320 if (*avail_desc_idx >= q->desc_max_len) { in get_rx_avail_desc_idx()
321 return -EINVAL; in get_rx_avail_desc_idx()