1 /*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3 * Copyright (c) 2016 Freescale Semiconductor, Inc.
4 * Copyright 2016-2019 NXP
5 * All rights reserved.
6 *
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "rpmsg_env.h"
31 #include "virtqueue.h"
32
33 /* Prototype for internal functions. */
34 static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx);
35 static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len);
36 static uint16_t vq_ring_add_buffer(
37 struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length);
38 static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc);
39 static int32_t vq_ring_must_notify_host(struct virtqueue *vq);
40 static void vq_ring_notify_host(struct virtqueue *vq);
41 static uint16_t virtqueue_nused(struct virtqueue *vq);
42
43 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
44 /*!
45 * virtqueue_create_static - Creates new VirtIO queue - static version
46 *
47 * @param id - VirtIO queue ID , must be unique
48 * @param name - Name of VirtIO queue
49 * @param ring - Pointer to vring_alloc_info control block
50 * @param callback - Pointer to callback function, invoked
51 * when message is available on VirtIO queue
52 * @param notify - Pointer to notify function, used to notify
53 * other side that there is job available for it
54 * @param v_queue - Created VirtIO queue.
55 * @param vq_ctxt - Statically allocated virtqueue context
56 *
57 * @return - Function status
58 */
virtqueue_create_static(uint16_t id,const char * name,struct vring_alloc_info * ring,void (* callback_fc)(struct virtqueue * vq),void (* notify_fc)(struct virtqueue * vq),struct virtqueue ** v_queue,struct vq_static_context * vq_ctxt)59 int32_t virtqueue_create_static(uint16_t id,
60 const char *name,
61 struct vring_alloc_info *ring,
62 void (*callback_fc)(struct virtqueue *vq),
63 void (*notify_fc)(struct virtqueue *vq),
64 struct virtqueue **v_queue,
65 struct vq_static_context *vq_ctxt)
66 {
67 struct virtqueue *vq = VQ_NULL;
68 volatile int32_t status = VQUEUE_SUCCESS;
69 uint32_t vq_size = 0U;
70
71 VQ_PARAM_CHK(vq_ctxt == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
72 VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
73 VQ_PARAM_CHK(ring->num_descs == 0U, status, ERROR_VQUEUE_INVLD_PARAM);
74 VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1U), status, ERROR_VRING_ALIGN);
75
76 if (status == VQUEUE_SUCCESS)
77 {
78 vq_size = sizeof(struct virtqueue);
79 vq = &vq_ctxt->vq;
80
81 env_memset(vq, 0x00, vq_size);
82
83 env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
84 vq->vq_queue_index = id;
85 vq->vq_alignment = (int32_t)(ring->align);
86 vq->vq_nentries = ring->num_descs;
87 vq->callback_fc = callback_fc;
88 vq->notify_fc = notify_fc;
89
90 // indirect addition is not supported
91 vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
92 vq->vq_ring_mem = (void *)ring->phy_addr;
93
94 vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment);
95
96 *v_queue = vq;
97 }
98
99 return (status);
100 }
101 #else
102 /*!
103 * virtqueue_create - Creates new VirtIO queue
104 *
105 * @param id - VirtIO queue ID , must be unique
106 * @param name - Name of VirtIO queue
107 * @param ring - Pointer to vring_alloc_info control block
108 * @param callback - Pointer to callback function, invoked
109 * when message is available on VirtIO queue
110 * @param notify - Pointer to notify function, used to notify
111 * other side that there is job available for it
112 * @param v_queue - Created VirtIO queue.
113 *
114 * @return - Function status
115 */
virtqueue_create(uint16_t id,const char * name,struct vring_alloc_info * ring,void (* callback_fc)(struct virtqueue * vq),void (* notify_fc)(struct virtqueue * vq),struct virtqueue ** v_queue)116 int32_t virtqueue_create(uint16_t id,
117 const char *name,
118 struct vring_alloc_info *ring,
119 void (*callback_fc)(struct virtqueue *vq),
120 void (*notify_fc)(struct virtqueue *vq),
121 struct virtqueue **v_queue)
122 {
123 struct virtqueue *vq = VQ_NULL;
124 volatile int32_t status = VQUEUE_SUCCESS;
125 uint32_t vq_size = 0U;
126
127 VQ_PARAM_CHK(ring == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
128 VQ_PARAM_CHK(ring->num_descs == 0U, status, ERROR_VQUEUE_INVLD_PARAM);
129 VQ_PARAM_CHK(ring->num_descs & (ring->num_descs - 1U), status, ERROR_VRING_ALIGN);
130
131 if (status == VQUEUE_SUCCESS)
132 {
133 vq_size = sizeof(struct virtqueue);
134 vq = (struct virtqueue *)env_allocate_memory(vq_size);
135
136 if (vq == VQ_NULL)
137 {
138 return (ERROR_NO_MEM);
139 }
140
141 env_memset(vq, 0x00, vq_size);
142
143 env_strncpy(vq->vq_name, name, VIRTQUEUE_MAX_NAME_SZ);
144 vq->vq_queue_index = id;
145 vq->vq_alignment = (int32_t)(ring->align);
146 vq->vq_nentries = ring->num_descs;
147 vq->callback_fc = callback_fc;
148 vq->notify_fc = notify_fc;
149
150 // indirect addition is not supported
151 vq->vq_ring_size = vring_size(ring->num_descs, ring->align);
152 vq->vq_ring_mem = (void *)ring->phy_addr;
153
154 vring_init(&vq->vq_ring, vq->vq_nentries, vq->vq_ring_mem, (uint32_t)vq->vq_alignment);
155
156 *v_queue = vq;
157 }
158
159 return (status);
160 }
161 #endif /* RL_USE_STATIC_API */
162
163 /*!
164 * virtqueue_add_buffer() - Enqueues new buffer in vring for consumption
165 * by other side.
166 *
167 * @param vq - Pointer to VirtIO queue control block.
168 * @param head_idx - Index of buffer to be added to the avail ring
169 *
170 * @return - Function status
171 */
virtqueue_add_buffer(struct virtqueue * vq,uint16_t head_idx)172 int32_t virtqueue_add_buffer(struct virtqueue *vq, uint16_t head_idx)
173 {
174 volatile int32_t status = VQUEUE_SUCCESS;
175
176 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
177
178 VQUEUE_BUSY(vq, avail_write);
179
180 if (status == VQUEUE_SUCCESS)
181 {
182 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
183
184 /*
185 * Update vring_avail control block fields so that other
186 * side can get buffer using it.
187 */
188 vq_ring_update_avail(vq, head_idx);
189 }
190
191 VQUEUE_IDLE(vq, avail_write);
192
193 return (status);
194 }
195
196 /*!
197 * virtqueue_fill_avail_buffers - Enqueues single buffer in vring, updates avail
198 *
199 * @param vq - Pointer to VirtIO queue control block
200 * @param buffer - Address of buffer
201 * @param len - Length of buffer
202 *
203 * @return - Function status
204 */
virtqueue_fill_avail_buffers(struct virtqueue * vq,void * buffer,uint32_t len)205 int32_t virtqueue_fill_avail_buffers(struct virtqueue *vq, void *buffer, uint32_t len)
206 {
207 struct vring_desc *dp;
208 uint16_t head_idx;
209
210 volatile int32_t status = VQUEUE_SUCCESS;
211
212 VQ_PARAM_CHK(vq == VQ_NULL, status, ERROR_VQUEUE_INVLD_PARAM);
213
214 VQUEUE_BUSY(vq, avail_write);
215
216 if (status == VQUEUE_SUCCESS)
217 {
218 head_idx = vq->vq_desc_head_idx;
219
220 dp = &vq->vq_ring.desc[head_idx];
221 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
222 dp->addr = env_map_vatopa(vq->env, buffer);
223 #else
224 dp->addr = env_map_vatopa(buffer);
225 #endif
226 dp->len = len;
227 dp->flags = VRING_DESC_F_WRITE;
228
229 vq->vq_desc_head_idx++;
230
231 vq_ring_update_avail(vq, head_idx);
232 }
233
234 VQUEUE_IDLE(vq, avail_write);
235
236 return (status);
237 }
238
239 /*!
240 * virtqueue_get_buffer - Returns used buffers from VirtIO queue
241 *
242 * @param vq - Pointer to VirtIO queue control block
243 * @param len - Length of consumed buffer
244 * @param idx - Index to buffer descriptor pool
245 *
246 * @return - Pointer to used buffer
247 */
virtqueue_get_buffer(struct virtqueue * vq,uint32_t * len,uint16_t * idx)248 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
249 {
250 struct vring_used_elem *uep;
251 uint16_t used_idx, desc_idx;
252
253 if ((vq == VQ_NULL) || (vq->vq_used_cons_idx == vq->vq_ring.used->idx))
254 {
255 return (VQ_NULL);
256 }
257 VQUEUE_BUSY(vq, used_read);
258
259 used_idx = (uint16_t)(vq->vq_used_cons_idx & ((uint16_t)(vq->vq_nentries - 1U)));
260 uep = &vq->vq_ring.used->ring[used_idx];
261
262 env_rmb();
263
264 desc_idx = (uint16_t)uep->id;
265 if (len != VQ_NULL)
266 {
267 *len = uep->len;
268 }
269
270 if (idx != VQ_NULL)
271 {
272 *idx = desc_idx;
273 }
274
275 vq->vq_used_cons_idx++;
276
277 VQUEUE_IDLE(vq, used_read);
278
279 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
280 return env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[desc_idx].addr)));
281 #else
282 return env_map_patova((uint32_t)(vq->vq_ring.desc[desc_idx].addr));
283 #endif
284 }
285
286 /*!
287 * virtqueue_get_buffer_length - Returns size of a buffer
288 *
289 * @param vq - Pointer to VirtIO queue control block
290 * @param idx - Index to buffer descriptor pool
291 *
292 * @return - Buffer length
293 */
virtqueue_get_buffer_length(struct virtqueue * vq,uint16_t idx)294 uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
295 {
296 return vq->vq_ring.desc[idx].len;
297 }
298
299 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
300 /*!
301 * virtqueue_free - Frees VirtIO queue resources - static version
302 *
303 * @param vq - Pointer to VirtIO queue control block
304 *
305 */
virtqueue_free_static(struct virtqueue * vq)306 void virtqueue_free_static(struct virtqueue *vq)
307 {
308 if (vq != VQ_NULL)
309 {
310 if (vq->vq_ring_mem != VQ_NULL)
311 {
312 vq->vq_ring_size = 0;
313 vq->vq_ring_mem = VQ_NULL;
314 }
315 }
316 }
317 #else
318 /*!
319 * virtqueue_free - Frees VirtIO queue resources
320 *
321 * @param vq - Pointer to VirtIO queue control block
322 *
323 */
virtqueue_free(struct virtqueue * vq)324 void virtqueue_free(struct virtqueue *vq)
325 {
326 if (vq != VQ_NULL)
327 {
328 if (vq->vq_ring_mem != VQ_NULL)
329 {
330 vq->vq_ring_size = 0;
331 vq->vq_ring_mem = VQ_NULL;
332 }
333
334 env_free_memory(vq);
335 }
336 }
337 #endif /* RL_USE_STATIC_API */
338
339 /*!
340 * virtqueue_get_available_buffer - Returns buffer available for use in the
341 * VirtIO queue
342 *
343 * @param vq - Pointer to VirtIO queue control block
344 * @param avail_idx - Pointer to index used in vring desc table
345 * @param len - Length of buffer
346 *
347 * @return - Pointer to available buffer
348 */
virtqueue_get_available_buffer(struct virtqueue * vq,uint16_t * avail_idx,uint32_t * len)349 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx, uint32_t *len)
350 {
351 uint16_t head_idx = 0;
352 void *buffer;
353
354 if (vq->vq_available_idx == vq->vq_ring.avail->idx)
355 {
356 return (VQ_NULL);
357 }
358
359 VQUEUE_BUSY(vq, avail_read);
360
361 head_idx = (uint16_t)(vq->vq_available_idx++ & ((uint16_t)(vq->vq_nentries - 1U)));
362 *avail_idx = vq->vq_ring.avail->ring[head_idx];
363
364 env_rmb();
365 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
366 buffer = env_map_patova(vq->env, ((uint32_t)(vq->vq_ring.desc[*avail_idx].addr)));
367 #else
368 buffer = env_map_patova((uint32_t)(vq->vq_ring.desc[*avail_idx].addr));
369 #endif
370 *len = vq->vq_ring.desc[*avail_idx].len;
371
372 VQUEUE_IDLE(vq, avail_read);
373
374 return (buffer);
375 }
376
377 /*!
378 * virtqueue_add_consumed_buffer - Returns consumed buffer back to VirtIO queue
379 *
380 * @param vq - Pointer to VirtIO queue control block
381 * @param head_idx - Index of vring desc containing used buffer
382 * @param len - Length of buffer
383 *
384 * @return - Function status
385 */
virtqueue_add_consumed_buffer(struct virtqueue * vq,uint16_t head_idx,uint32_t len)386 int32_t virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx, uint32_t len)
387 {
388 if (head_idx > vq->vq_nentries)
389 {
390 return (ERROR_VRING_NO_BUFF);
391 }
392
393 VQUEUE_BUSY(vq, used_write);
394 vq_ring_update_used(vq, head_idx, len);
395 VQUEUE_IDLE(vq, used_write);
396
397 return (VQUEUE_SUCCESS);
398 }
399
400 /*!
401 * virtqueue_fill_used_buffers - Fill used buffer ring
402 *
403 * @param vq - Pointer to VirtIO queue control block
404 * @param buffer - Buffer to add
405 * @param len - Length of buffer
406 *
407 * @return - Function status
408 */
virtqueue_fill_used_buffers(struct virtqueue * vq,void * buffer,uint32_t len)409 int32_t virtqueue_fill_used_buffers(struct virtqueue *vq, void *buffer, uint32_t len)
410 {
411 uint16_t head_idx;
412 uint16_t idx;
413
414 VQUEUE_BUSY(vq, used_write);
415
416 head_idx = vq->vq_desc_head_idx;
417 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
418
419 /* Enqueue buffer onto the ring. */
420 idx = vq_ring_add_buffer(vq, vq->vq_ring.desc, head_idx, buffer, len);
421
422 vq->vq_desc_head_idx = idx;
423
424 vq_ring_update_used(vq, head_idx, len);
425
426 VQUEUE_IDLE(vq, used_write);
427
428 return (VQUEUE_SUCCESS);
429 }
430
431 /*!
432 * virtqueue_enable_cb - Enables callback generation
433 *
434 * @param vq - Pointer to VirtIO queue control block
435 *
436 * @return - Function status
437 */
virtqueue_enable_cb(struct virtqueue * vq)438 int32_t virtqueue_enable_cb(struct virtqueue *vq)
439 {
440 /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
441 * implementation */
442 return (vq_ring_enable_interrupt(vq, 0));
443 }
444 /* coco end */
445 /*!
446 * virtqueue_enable_cb - Disables callback generation
447 *
448 * @param vq - Pointer to VirtIO queue control block
449 *
450 */
virtqueue_disable_cb(struct virtqueue * vq)451 void virtqueue_disable_cb(struct virtqueue *vq)
452 {
453 VQUEUE_BUSY(vq, avail_write);
454
455 if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
456 {
457 /* coco begin validated: This part does not need to be tested because VIRTQUEUE_FLAG_EVENT_IDX is not being
458 * utilized in rpmsg_lite implementation */
459 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1U;
460 }
461 /* coco end */
462 else
463 {
464 vq->vq_ring.avail->flags |= (uint16_t)VRING_AVAIL_F_NO_INTERRUPT;
465 }
466
467 VQUEUE_IDLE(vq, avail_write);
468 }
469
470 /*!
471 * virtqueue_kick - Notifies other side that there is buffer available for it.
472 *
473 * @param vq - Pointer to VirtIO queue control block
474 */
virtqueue_kick(struct virtqueue * vq)475 void virtqueue_kick(struct virtqueue *vq)
476 {
477 VQUEUE_BUSY(vq, avail_write);
478
479 /* Ensure updated avail->idx is visible to host. */
480 env_mb();
481
482 if (0 != vq_ring_must_notify_host(vq))
483 {
484 vq_ring_notify_host(vq);
485 }
486 vq->vq_queued_cnt = 0;
487
488 VQUEUE_IDLE(vq, avail_write);
489 }
490
491 /*!
492 * virtqueue_dump Dumps important virtqueue fields , use for debugging purposes
493 *
494 * @param vq - Pointer to VirtIO queue control block
495 */
virtqueue_dump(struct virtqueue * vq)496 void virtqueue_dump(struct virtqueue *vq)
497 {
498 /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
499 * implementation */
500 if (vq == VQ_NULL)
501 {
502 return;
503 }
504
505 env_print(
506 "VQ: %s - size=%d; used=%d; queued=%d; "
507 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
508 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\r\n",
509 vq->vq_name, vq->vq_nentries, virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
510 vq->vq_ring.avail->idx, vq->vq_used_cons_idx, vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
511 vq->vq_ring.used->flags);
512 }
513 /* coco end */
514
515 /*!
516 * virtqueue_get_desc_size - Returns vring descriptor size
517 *
518 * @param vq - Pointer to VirtIO queue control block
519 *
520 * @return - Descriptor length
521 */
virtqueue_get_desc_size(struct virtqueue * vq)522 uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
523 {
524 /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
525 * implementation */
526 uint16_t head_idx;
527 uint16_t avail_idx;
528 uint32_t len;
529
530 if (vq->vq_available_idx == vq->vq_ring.avail->idx)
531 {
532 return 0;
533 }
534
535 head_idx = (uint16_t)(vq->vq_available_idx & ((uint16_t)(vq->vq_nentries - 1U)));
536 avail_idx = vq->vq_ring.avail->ring[head_idx];
537 len = vq->vq_ring.desc[avail_idx].len;
538
539 return (len);
540 }
541 /* coco end */
542
543 /**************************************************************************
544 * Helper Functions *
545 **************************************************************************/
546
547 /*!
548 *
549 * vq_ring_add_buffer
550 *
551 */
vq_ring_add_buffer(struct virtqueue * vq,struct vring_desc * desc,uint16_t head_idx,void * buffer,uint32_t length)552 static uint16_t vq_ring_add_buffer(
553 struct virtqueue *vq, struct vring_desc *desc, uint16_t head_idx, void *buffer, uint32_t length)
554 {
555 struct vring_desc *dp;
556
557 if (buffer == VQ_NULL)
558 {
559 return head_idx; /* coco validated: line never reached, vq_ring_add_buffer() is called from
560 rpmsg_lite_master_init() only and the buffer parameter not being null check is done before
561 passing the parameter */
562 }
563
564 VQASSERT(vq, head_idx != VQ_RING_DESC_CHAIN_END, "premature end of free desc chain");
565
566 dp = &desc[head_idx];
567 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
568 dp->addr = env_map_vatopa(vq->env, buffer);
569 #else
570 dp->addr = env_map_vatopa(buffer);
571 #endif
572 dp->len = length;
573 dp->flags = VRING_DESC_F_WRITE;
574
575 return (head_idx + 1U);
576 }
577
578 /*!
579 *
580 * vq_ring_init
581 *
582 */
vq_ring_init(struct virtqueue * vq)583 void vq_ring_init(struct virtqueue *vq)
584 {
585 struct vring *vr;
586 uint32_t i, size;
587
588 size = (uint32_t)(vq->vq_nentries);
589 vr = &vq->vq_ring;
590
591 for (i = 0U; i < size - 1U; i++)
592 {
593 vr->desc[i].next = (uint16_t)(i + 1U);
594 }
595 vr->desc[i].next = (uint16_t)VQ_RING_DESC_CHAIN_END;
596 }
597
598 /*!
599 *
600 * vq_ring_update_avail
601 *
602 */
vq_ring_update_avail(struct virtqueue * vq,uint16_t desc_idx)603 static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
604 {
605 uint16_t avail_idx;
606
607 /*
608 * Place the head of the descriptor chain into the next slot and make
609 * it usable to the host. The chain is made available now rather than
610 * deferring to virtqueue_notify() in the hopes that if the host is
611 * currently running on another CPU, we can keep it processing the new
612 * descriptor.
613 */
614 avail_idx = (uint16_t)(vq->vq_ring.avail->idx & ((uint16_t)(vq->vq_nentries - 1U)));
615 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
616
617 env_wmb();
618
619 vq->vq_ring.avail->idx++;
620
621 /* Keep pending count until virtqueue_notify(). */
622 vq->vq_queued_cnt++;
623 }
624
625 /*!
626 *
627 * vq_ring_update_used
628 *
629 */
vq_ring_update_used(struct virtqueue * vq,uint16_t head_idx,uint32_t len)630 static void vq_ring_update_used(struct virtqueue *vq, uint16_t head_idx, uint32_t len)
631 {
632 uint16_t used_idx;
633 struct vring_used_elem *used_desc = VQ_NULL;
634
635 /*
636 * Place the head of the descriptor chain into the next slot and make
637 * it usable to the host. The chain is made available now rather than
638 * deferring to virtqueue_notify() in the hopes that if the host is
639 * currently running on another CPU, we can keep it processing the new
640 * descriptor.
641 */
642 used_idx = vq->vq_ring.used->idx & (vq->vq_nentries - 1U);
643 used_desc = &(vq->vq_ring.used->ring[used_idx]);
644 used_desc->id = head_idx;
645 used_desc->len = len;
646
647 env_wmb();
648
649 vq->vq_ring.used->idx++;
650 }
651
652 /*!
653 *
654 * vq_ring_enable_interrupt
655 *
656 */
vq_ring_enable_interrupt(struct virtqueue * vq,uint16_t ndesc)657 static int32_t vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
658 {
659 /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
660 * implementation */
661 /*
662 * Enable interrupts, making sure we get the latest index of
663 * what's already been consumed.
664 */
665 if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
666 {
667 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
668 }
669 else
670 {
671 vq->vq_ring.avail->flags &= ~(uint16_t)VRING_AVAIL_F_NO_INTERRUPT;
672 }
673
674 env_mb();
675
676 /*
677 * Enough items may have already been consumed to meet our threshold
678 * since we last checked. Let our caller know so it processes the new
679 * entries.
680 */
681 if (virtqueue_nused(vq) > ndesc)
682 {
683 return (1);
684 }
685
686 return (0);
687 }
688 /* coco end */
689
690 /*!
691 *
692 * virtqueue_interrupt
693 *
694 */
virtqueue_notification(struct virtqueue * vq)695 void virtqueue_notification(struct virtqueue *vq)
696 {
697 if (vq != VQ_NULL)
698 {
699 if (vq->callback_fc != VQ_NULL)
700 {
701 vq->callback_fc(vq);
702 }
703 }
704 }
705
706 /*!
707 *
708 * vq_ring_must_notify_host
709 *
710 */
vq_ring_must_notify_host(struct virtqueue * vq)711 static int32_t vq_ring_must_notify_host(struct virtqueue *vq)
712 {
713 uint16_t new_idx, prev_idx;
714 uint16_t event_idx;
715
716 if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) != 0UL)
717 {
718 /* coco begin validated: This part does not need to be tested because VIRTQUEUE_FLAG_EVENT_IDX is not being
719 * utilized in rpmsg_lite implementation */
720 new_idx = vq->vq_ring.avail->idx;
721 prev_idx = new_idx - vq->vq_queued_cnt;
722 event_idx = (uint16_t)vring_avail_event(&vq->vq_ring);
723
724 return ((vring_need_event(event_idx, new_idx, prev_idx) != 0) ? 1 : 0);
725 }
726 /* coco end */
727
728 return (((vq->vq_ring.used->flags & ((uint16_t)VRING_USED_F_NO_NOTIFY)) == 0U) ? 1 : 0);
729 }
730
731 /*!
732 *
733 * vq_ring_notify_host
734 *
735 */
vq_ring_notify_host(struct virtqueue * vq)736 static void vq_ring_notify_host(struct virtqueue *vq)
737 {
738 if (vq->notify_fc != VQ_NULL)
739 {
740 vq->notify_fc(vq);
741 }
742 }
743
744 /*!
745 *
746 * virtqueue_nused
747 *
748 */
virtqueue_nused(struct virtqueue * vq)749 static uint16_t virtqueue_nused(struct virtqueue *vq)
750 {
751 /* coco begin validated: This virtqueue function does not need to be tested because it is not used in rpmsg_lite
752 * implementation */
753 uint16_t used_idx, nused;
754
755 used_idx = vq->vq_ring.used->idx;
756
757 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
758 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
759
760 return (nused);
761 }
762 /* coco end */
763