1 /*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <string.h>
9 #include <openamp/virtio.h>
10 #include <openamp/virtqueue.h>
11 #include <metal/atomic.h>
12 #include <metal/log.h>
13 #include <metal/alloc.h>
14
15 /* Prototype for internal functions. */
16 static void vq_ring_init(struct virtqueue *, void *, int);
17 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
18 static uint16_t vq_ring_add_buffer(struct virtqueue *, struct vring_desc *,
19 uint16_t, struct virtqueue_buf *, int, int);
20 static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
21 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
22 static int vq_ring_must_notify(struct virtqueue *vq);
23 static void vq_ring_notify(struct virtqueue *vq);
24 static int virtqueue_nused(struct virtqueue *vq);
25 static int virtqueue_navail(struct virtqueue *vq);
26
27 /* Default implementation of P2V based on libmetal */
virtqueue_phys_to_virt(struct virtqueue * vq,metal_phys_addr_t phys)28 static inline void *virtqueue_phys_to_virt(struct virtqueue *vq,
29 metal_phys_addr_t phys)
30 {
31 struct metal_io_region *io = vq->shm_io;
32
33 return metal_io_phys_to_virt(io, phys);
34 }
35
36 /* Default implementation of V2P based on libmetal */
virtqueue_virt_to_phys(struct virtqueue * vq,void * buf)37 static inline metal_phys_addr_t virtqueue_virt_to_phys(struct virtqueue *vq,
38 void *buf)
39 {
40 struct metal_io_region *io = vq->shm_io;
41
42 return metal_io_virt_to_phys(io, buf);
43 }
44
virtqueue_create(struct virtio_device * virt_dev,unsigned short id,const char * name,struct vring_alloc_info * ring,void (* callback)(struct virtqueue * vq),void (* notify)(struct virtqueue * vq),struct virtqueue * vq)45 int virtqueue_create(struct virtio_device *virt_dev, unsigned short id,
46 const char *name, struct vring_alloc_info *ring,
47 void (*callback)(struct virtqueue *vq),
48 void (*notify)(struct virtqueue *vq),
49 struct virtqueue *vq)
50 {
51 int status = VQUEUE_SUCCESS;
52
53 VQ_PARAM_CHK(ring == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
54 VQ_PARAM_CHK(ring->num_descs == 0, status, ERROR_VQUEUE_INVLD_PARAM);
55 VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1), status,
56 ERROR_VRING_ALIGN);
57 VQ_PARAM_CHK(vq == NULL, status, ERROR_NO_MEM);
58
59 if (status == VQUEUE_SUCCESS) {
60 vq->vq_dev = virt_dev;
61 vq->vq_name = name;
62 vq->vq_queue_index = id;
63 vq->vq_nentries = ring->num_descs;
64 vq->vq_free_cnt = vq->vq_nentries;
65 vq->callback = callback;
66 vq->notify = notify;
67
68 /* Initialize vring control block in virtqueue. */
69 vq_ring_init(vq, ring->vaddr, ring->align);
70 }
71
72 /*
73 * CACHE: nothing to be done here. Only desc.next is setup at this
74 * stage but that is only written by driver, so no need to flush it.
75 */
76
77 return status;
78 }
79
virtqueue_add_buffer(struct virtqueue * vq,struct virtqueue_buf * buf_list,int readable,int writable,void * cookie)80 int virtqueue_add_buffer(struct virtqueue *vq, struct virtqueue_buf *buf_list,
81 int readable, int writable, void *cookie)
82 {
83 struct vq_desc_extra *dxp = NULL;
84 int status = VQUEUE_SUCCESS;
85 uint16_t head_idx;
86 uint16_t idx;
87 int needed;
88
89 needed = readable + writable;
90
91 VQ_PARAM_CHK(vq == NULL, status, ERROR_VQUEUE_INVLD_PARAM);
92 VQ_PARAM_CHK(needed < 1, status, ERROR_VQUEUE_INVLD_PARAM);
93 VQ_PARAM_CHK(vq->vq_free_cnt < needed, status, ERROR_VRING_FULL);
94
95 VQUEUE_BUSY(vq);
96
97 if (status == VQUEUE_SUCCESS) {
98 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
99
100 head_idx = vq->vq_desc_head_idx;
101 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
102 dxp = &vq->vq_descx[head_idx];
103
104 VQASSERT(vq, dxp->cookie == NULL,
105 "cookie already exists for index");
106
107 dxp->cookie = cookie;
108 dxp->ndescs = needed;
109
110 /* Enqueue buffer onto the ring. */
111 idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx,
112 buf_list, readable, writable);
113
114 vq->vq_desc_head_idx = idx;
115 vq->vq_free_cnt -= needed;
116
117 if (vq->vq_free_cnt == 0) {
118 VQ_RING_ASSERT_CHAIN_TERM(vq);
119 } else {
120 VQ_RING_ASSERT_VALID_IDX(vq, idx);
121 }
122
123 /*
124 * Update vring_avail control block fields so that other
125 * side can get buffer using it.
126 */
127 vq_ring_update_avail(vq, head_idx);
128 }
129
130 VQUEUE_IDLE(vq);
131
132 return status;
133 }
134
virtqueue_get_buffer(struct virtqueue * vq,uint32_t * len,uint16_t * idx)135 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
136 {
137 struct vring_used_elem *uep;
138 void *cookie;
139 uint16_t used_idx, desc_idx;
140
141 /* Used.idx is updated by the virtio device, so we need to invalidate */
142 VRING_INVALIDATE(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));
143
144 if (!vq || vq->vq_used_cons_idx == vq->vq_ring.used->idx)
145 return NULL;
146
147 VQUEUE_BUSY(vq);
148
149 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
150 uep = &vq->vq_ring.used->ring[used_idx];
151
152 atomic_thread_fence(memory_order_seq_cst);
153
154 /* Used.ring is written by remote, invalidate it */
155 VRING_INVALIDATE(&vq->vq_ring.used->ring[used_idx],
156 sizeof(vq->vq_ring.used->ring[used_idx]));
157
158 desc_idx = (uint16_t)uep->id;
159 if (len)
160 *len = uep->len;
161
162 vq_ring_free_chain(vq, desc_idx);
163
164 cookie = vq->vq_descx[desc_idx].cookie;
165 vq->vq_descx[desc_idx].cookie = NULL;
166
167 if (idx)
168 *idx = used_idx;
169 VQUEUE_IDLE(vq);
170
171 return cookie;
172 }
173
virtqueue_get_buffer_length(struct virtqueue * vq,uint16_t idx)174 uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
175 {
176 /* Invalidate the desc entry written by driver before accessing it */
177 VRING_INVALIDATE(&vq->vq_ring.desc[idx].len,
178 sizeof(vq->vq_ring.desc[idx].len));
179 return vq->vq_ring.desc[idx].len;
180 }
181
virtqueue_get_buffer_addr(struct virtqueue * vq,uint16_t idx)182 void *virtqueue_get_buffer_addr(struct virtqueue *vq, uint16_t idx)
183 {
184 /* Invalidate the desc entry written by driver before accessing it */
185 VRING_INVALIDATE(&vq->vq_ring.desc[idx].addr,
186 sizeof(vq->vq_ring.desc[idx].addr));
187 return virtqueue_phys_to_virt(vq, vq->vq_ring.desc[idx].addr);
188 }
189
virtqueue_free(struct virtqueue * vq)190 void virtqueue_free(struct virtqueue *vq)
191 {
192 if (vq) {
193 if (vq->vq_free_cnt != vq->vq_nentries) {
194 metal_log(METAL_LOG_WARNING,
195 "%s: freeing non-empty virtqueue\r\n",
196 vq->vq_name);
197 }
198
199 metal_free_memory(vq);
200 }
201 }
202
virtqueue_get_available_buffer(struct virtqueue * vq,uint16_t * avail_idx,uint32_t * len)203 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
204 uint32_t *len)
205 {
206 uint16_t head_idx = 0;
207 void *buffer;
208
209 atomic_thread_fence(memory_order_seq_cst);
210
211 /* Avail.idx is updated by driver, invalidate it */
212 VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
213 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
214 return NULL;
215 }
216
217 VQUEUE_BUSY(vq);
218
219 head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);
220
221 /* Avail.ring is updated by driver, invalidate it */
222 VRING_INVALIDATE(&vq->vq_ring.avail->ring[head_idx],
223 sizeof(vq->vq_ring.avail->ring[head_idx]));
224 *avail_idx = vq->vq_ring.avail->ring[head_idx];
225
226 buffer = virtqueue_get_buffer_addr(vq, *avail_idx);
227 *len = virtqueue_get_buffer_length(vq, *avail_idx);
228
229 VQUEUE_IDLE(vq);
230
231 return buffer;
232 }
233
virtqueue_add_consumed_buffer(struct virtqueue * vq,uint16_t head_idx,uint32_t len)234 int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
235 uint32_t len)
236 {
237 struct vring_used_elem *used_desc = NULL;
238 uint16_t used_idx;
239
240 if (head_idx >= vq->vq_nentries) {
241 return ERROR_VRING_NO_BUFF;
242 }
243
244 VQUEUE_BUSY(vq);
245
246 /* CACHE: used is never written by driver, so it's safe to directly access it */
247 used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1);
248 used_desc = &vq->vq_ring.used->ring[used_idx];
249 used_desc->id = head_idx;
250 used_desc->len = len;
251
252 /* We still need to flush it because this is read by driver */
253 VRING_FLUSH(&vq->vq_ring.used->ring[used_idx],
254 sizeof(vq->vq_ring.used->ring[used_idx]));
255
256 atomic_thread_fence(memory_order_seq_cst);
257
258 vq->vq_ring.used->idx++;
259
260 /* Used.idx is read by driver, so we need to flush it */
261 VRING_FLUSH(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));
262
263 /* Keep pending count until virtqueue_notify(). */
264 vq->vq_queued_cnt++;
265
266 VQUEUE_IDLE(vq);
267
268 return VQUEUE_SUCCESS;
269 }
270
virtqueue_enable_cb(struct virtqueue * vq)271 int virtqueue_enable_cb(struct virtqueue *vq)
272 {
273 return vq_ring_enable_interrupt(vq, 0);
274 }
275
virtqueue_disable_cb(struct virtqueue * vq)276 void virtqueue_disable_cb(struct virtqueue *vq)
277 {
278 VQUEUE_BUSY(vq);
279
280 if (vq->vq_dev->features & VIRTIO_RING_F_EVENT_IDX) {
281 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
282 vring_used_event(&vq->vq_ring) =
283 vq->vq_used_cons_idx - vq->vq_nentries - 1;
284 VRING_FLUSH(&vring_used_event(&vq->vq_ring),
285 sizeof(vring_used_event(&vq->vq_ring)));
286 }
287 if (VIRTIO_ROLE_IS_DEVICE(vq->vq_dev)) {
288 vring_avail_event(&vq->vq_ring) =
289 vq->vq_available_idx - vq->vq_nentries - 1;
290 VRING_FLUSH(&vring_avail_event(&vq->vq_ring),
291 sizeof(vring_avail_event(&vq->vq_ring)));
292 }
293 } else {
294 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
295 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
296 VRING_FLUSH(&vq->vq_ring.avail->flags,
297 sizeof(vq->vq_ring.avail->flags));
298 }
299 if (VIRTIO_ROLE_IS_DEVICE(vq->vq_dev)) {
300 vq->vq_ring.used->flags |= VRING_USED_F_NO_NOTIFY;
301 VRING_FLUSH(&vq->vq_ring.used->flags,
302 sizeof(vq->vq_ring.used->flags));
303 }
304 }
305
306 VQUEUE_IDLE(vq);
307 }
308
virtqueue_kick(struct virtqueue * vq)309 void virtqueue_kick(struct virtqueue *vq)
310 {
311 VQUEUE_BUSY(vq);
312
313 /* Ensure updated avail->idx is visible to host. */
314 atomic_thread_fence(memory_order_seq_cst);
315
316 if (vq_ring_must_notify(vq))
317 vq_ring_notify(vq);
318
319 vq->vq_queued_cnt = 0;
320
321 VQUEUE_IDLE(vq);
322 }
323
virtqueue_dump(struct virtqueue * vq)324 void virtqueue_dump(struct virtqueue *vq)
325 {
326 if (!vq)
327 return;
328
329 VRING_INVALIDATE(&vq->vq_ring.avail, sizeof(vq->vq_ring.avail));
330 VRING_INVALIDATE(&vq->vq_ring.used, sizeof(vq->vq_ring.used));
331
332 metal_log(METAL_LOG_DEBUG,
333 "VQ: %s - size=%d; free=%d; queued=%d; desc_head_idx=%d; "
334 "available_idx=%d; avail.idx=%d; used_cons_idx=%d; "
335 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
336 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
337 vq->vq_queued_cnt, vq->vq_desc_head_idx, vq->vq_available_idx,
338 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
339 vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
340 vq->vq_ring.used->flags);
341 }
342
virtqueue_get_desc_size(struct virtqueue * vq)343 uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
344 {
345 uint16_t head_idx = 0;
346 uint16_t avail_idx = 0;
347 uint32_t len = 0;
348
349 /* Avail.idx is updated by driver, invalidate it */
350 VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
351
352 if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
353 return 0;
354 }
355
356 VQUEUE_BUSY(vq);
357
358 head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);
359
360 /* Avail.ring is updated by driver, invalidate it */
361 VRING_INVALIDATE(&vq->vq_ring.avail->ring[head_idx],
362 sizeof(vq->vq_ring.avail->ring[head_idx]));
363 avail_idx = vq->vq_ring.avail->ring[head_idx];
364
365 /* Invalidate the desc entry written by driver before accessing it */
366 VRING_INVALIDATE(&vq->vq_ring.desc[avail_idx].len,
367 sizeof(vq->vq_ring.desc[avail_idx].len));
368
369 len = vq->vq_ring.desc[avail_idx].len;
370
371 VQUEUE_IDLE(vq);
372
373 return len;
374 }
375
376 /**************************************************************************
377 * Helper Functions *
378 **************************************************************************/
379
380 /*
381 *
382 * vq_ring_add_buffer
383 *
384 */
vq_ring_add_buffer(struct virtqueue * vq,struct vring_desc * desc,uint16_t head_idx,struct virtqueue_buf * buf_list,int readable,int writable)385 static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
386 struct vring_desc *desc, uint16_t head_idx,
387 struct virtqueue_buf *buf_list, int readable,
388 int writable)
389 {
390 struct vring_desc *dp;
391 int i, needed;
392 uint16_t idx;
393
394 (void)vq;
395
396 needed = readable + writable;
397
398 for (i = 0, idx = head_idx; i < needed; i++, idx = dp->next) {
399 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
400 "premature end of free desc chain");
401
402 /* CACHE: No need to invalidate desc because it is only written by driver */
403 dp = &desc[idx];
404 dp->addr = virtqueue_virt_to_phys(vq, buf_list[i].buf);
405 dp->len = buf_list[i].len;
406 dp->flags = 0;
407
408 if (i < needed - 1)
409 dp->flags |= VRING_DESC_F_NEXT;
410
411 /*
412 * Readable buffers are inserted into vring before the
413 * writable buffers.
414 */
415 if (i >= readable)
416 dp->flags |= VRING_DESC_F_WRITE;
417
418 /*
419 * Instead of flushing the whole desc region, we flush only the
420 * single entry hopefully saving some cycles
421 */
422 VRING_FLUSH(&desc[idx], sizeof(desc[idx]));
423
424 }
425
426 return idx;
427 }
428
429 /*
430 *
431 * vq_ring_free_chain
432 *
433 */
vq_ring_free_chain(struct virtqueue * vq,uint16_t desc_idx)434 static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
435 {
436 struct vring_desc *dp;
437 struct vq_desc_extra *dxp;
438
439 /* CACHE: desc is never written by remote, no need to invalidate */
440 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
441 dp = &vq->vq_ring.desc[desc_idx];
442 dxp = &vq->vq_descx[desc_idx];
443
444 if (vq->vq_free_cnt == 0) {
445 VQ_RING_ASSERT_CHAIN_TERM(vq);
446 }
447
448 vq->vq_free_cnt += dxp->ndescs;
449 dxp->ndescs--;
450
451 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
452 while (dp->flags & VRING_DESC_F_NEXT) {
453 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
454 dp = &vq->vq_ring.desc[dp->next];
455 dxp->ndescs--;
456 }
457 }
458
459 VQASSERT(vq, dxp->ndescs == 0,
460 "failed to free entire desc chain, remaining");
461
462 /*
463 * We must append the existing free chain, if any, to the end of
464 * newly freed chain. If the virtqueue was completely used, then
465 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
466 *
467 * CACHE: desc.next is never read by remote, no need to flush it.
468 */
469 dp->next = vq->vq_desc_head_idx;
470 vq->vq_desc_head_idx = desc_idx;
471 }
472
473 /*
474 *
475 * vq_ring_init
476 *
477 */
vq_ring_init(struct virtqueue * vq,void * ring_mem,int alignment)478 static void vq_ring_init(struct virtqueue *vq, void *ring_mem, int alignment)
479 {
480 struct vring *vr;
481 int size;
482
483 size = vq->vq_nentries;
484 vr = &vq->vq_ring;
485
486 vring_init(vr, size, ring_mem, alignment);
487
488 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
489 int i;
490
491 for (i = 0; i < size - 1; i++)
492 vr->desc[i].next = i + 1;
493 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
494 }
495 }
496
497 /*
498 *
499 * vq_ring_update_avail
500 *
501 */
vq_ring_update_avail(struct virtqueue * vq,uint16_t desc_idx)502 static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
503 {
504 uint16_t avail_idx;
505
506 /*
507 * Place the head of the descriptor chain into the next slot and make
508 * it usable to the host. The chain is made available now rather than
509 * deferring to virtqueue_notify() in the hopes that if the host is
510 * currently running on another CPU, we can keep it processing the new
511 * descriptor.
512 *
513 * CACHE: avail is never written by remote, so it is safe to not invalidate here
514 */
515 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
516 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
517
518 /* We still need to flush the ring */
519 VRING_FLUSH(&vq->vq_ring.avail->ring[avail_idx],
520 sizeof(vq->vq_ring.avail->ring[avail_idx]));
521
522 atomic_thread_fence(memory_order_seq_cst);
523
524 vq->vq_ring.avail->idx++;
525
526 /* And the index */
527 VRING_FLUSH(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
528
529 /* Keep pending count until virtqueue_notify(). */
530 vq->vq_queued_cnt++;
531 }
532
533 /*
534 *
535 * vq_ring_enable_interrupt
536 *
537 */
vq_ring_enable_interrupt(struct virtqueue * vq,uint16_t ndesc)538 static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
539 {
540 /*
541 * Enable interrupts, making sure we get the latest index of
542 * what's already been consumed.
543 */
544 if (vq->vq_dev->features & VIRTIO_RING_F_EVENT_IDX) {
545 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
546 vring_used_event(&vq->vq_ring) =
547 vq->vq_used_cons_idx + ndesc;
548 VRING_FLUSH(&vring_used_event(&vq->vq_ring),
549 sizeof(vring_used_event(&vq->vq_ring)));
550 }
551 if (VIRTIO_ROLE_IS_DEVICE(vq->vq_dev)) {
552 vring_avail_event(&vq->vq_ring) =
553 vq->vq_available_idx + ndesc;
554 VRING_FLUSH(&vring_avail_event(&vq->vq_ring),
555 sizeof(vring_avail_event(&vq->vq_ring)));
556 }
557 } else {
558 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
559 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
560 VRING_FLUSH(&vq->vq_ring.avail->flags,
561 sizeof(vq->vq_ring.avail->flags));
562 }
563 if (VIRTIO_ROLE_IS_DEVICE(vq->vq_dev)) {
564 vq->vq_ring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
565 VRING_FLUSH(&vq->vq_ring.used->flags,
566 sizeof(vq->vq_ring.used->flags));
567 }
568 }
569
570 atomic_thread_fence(memory_order_seq_cst);
571
572 /*
573 * Enough items may have already been consumed to meet our threshold
574 * since we last checked. Let our caller know so it processes the new
575 * entries.
576 */
577 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
578 if (virtqueue_nused(vq) > ndesc) {
579 return 1;
580 }
581 }
582 if (VIRTIO_ROLE_IS_DEVICE(vq->vq_dev)) {
583 if (virtqueue_navail(vq) > ndesc) {
584 return 1;
585 }
586 }
587
588 return 0;
589 }
590
591 /*
592 *
593 * virtqueue_interrupt
594 *
595 */
virtqueue_notification(struct virtqueue * vq)596 void virtqueue_notification(struct virtqueue *vq)
597 {
598 atomic_thread_fence(memory_order_seq_cst);
599 if (vq->callback)
600 vq->callback(vq);
601 }
602
603 /*
604 *
605 * vq_ring_must_notify
606 *
607 */
vq_ring_must_notify(struct virtqueue * vq)608 static int vq_ring_must_notify(struct virtqueue *vq)
609 {
610 uint16_t new_idx, prev_idx, event_idx;
611
612 if (vq->vq_dev->features & VIRTIO_RING_F_EVENT_IDX) {
613 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
614 /* CACHE: no need to invalidate avail */
615 new_idx = vq->vq_ring.avail->idx;
616 prev_idx = new_idx - vq->vq_queued_cnt;
617 VRING_INVALIDATE(&vring_avail_event(&vq->vq_ring),
618 sizeof(vring_avail_event(&vq->vq_ring)));
619 event_idx = vring_avail_event(&vq->vq_ring);
620 return vring_need_event(event_idx, new_idx,
621 prev_idx) != 0;
622 }
623 if (VIRTIO_ROLE_IS_DEVICE(vq->vq_dev)) {
624 /* CACHE: no need to invalidate used */
625 new_idx = vq->vq_ring.used->idx;
626 prev_idx = new_idx - vq->vq_queued_cnt;
627 VRING_INVALIDATE(&vring_used_event(&vq->vq_ring),
628 sizeof(vring_used_event(&vq->vq_ring)));
629 event_idx = vring_used_event(&vq->vq_ring);
630 return vring_need_event(event_idx, new_idx,
631 prev_idx) != 0;
632 }
633 } else {
634 if (VIRTIO_ROLE_IS_DRIVER(vq->vq_dev)) {
635 VRING_INVALIDATE(&vq->vq_ring.used->flags,
636 sizeof(vq->vq_ring.used->flags));
637 return (vq->vq_ring.used->flags &
638 VRING_USED_F_NO_NOTIFY) == 0;
639 }
640 if (VIRTIO_ROLE_IS_DEVICE(vq->vq_dev)) {
641 VRING_INVALIDATE(&vq->vq_ring.avail->flags,
642 sizeof(vq->vq_ring.avail->flags));
643 return (vq->vq_ring.avail->flags &
644 VRING_AVAIL_F_NO_INTERRUPT) == 0;
645 }
646 }
647
648 return 0;
649 }
650
651 /*
652 *
653 * vq_ring_notify
654 *
655 */
vq_ring_notify(struct virtqueue * vq)656 static void vq_ring_notify(struct virtqueue *vq)
657 {
658 if (vq->notify)
659 vq->notify(vq);
660 }
661
662 /*
663 *
664 * virtqueue_nused
665 *
666 */
virtqueue_nused(struct virtqueue * vq)667 static int virtqueue_nused(struct virtqueue *vq)
668 {
669 uint16_t used_idx, nused;
670
671 /* Used is written by remote */
672 VRING_INVALIDATE(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));
673 used_idx = vq->vq_ring.used->idx;
674
675 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
676 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
677
678 return nused;
679 }
680
681 /*
682 *
683 * virtqueue_navail
684 *
685 */
virtqueue_navail(struct virtqueue * vq)686 static int virtqueue_navail(struct virtqueue *vq)
687 {
688 uint16_t avail_idx, navail;
689
690 /* Avail is written by driver */
691 VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
692
693 avail_idx = vq->vq_ring.avail->idx;
694
695 navail = (uint16_t)(avail_idx - vq->vq_available_idx);
696 VQASSERT(vq, navail <= vq->vq_nentries, "avail more than available");
697
698 return navail;
699 }
700