1 #ifndef VIRTQUEUE_H_
2 #define VIRTQUEUE_H_
3
4 /*-
5 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
6 * All rights reserved.
7 *
8 * SPDX-License-Identifier: BSD-2-Clause
9 *
10 * $FreeBSD$
11 */
12
13 #include <stdbool.h>
14 #include <stdint.h>
15
16 #if defined __cplusplus
17 extern "C" {
18 #endif
19
20 #include <openamp/virtio_ring.h>
21 #include <metal/alloc.h>
22 #include <metal/io.h>
23 #include <metal/cache.h>
24
25 /* Error Codes */
26 #define VQ_ERROR_BASE -3000
27 #define ERROR_VRING_FULL (VQ_ERROR_BASE - 1)
28 #define ERROR_INVLD_DESC_IDX (VQ_ERROR_BASE - 2)
29 #define ERROR_EMPTY_RING (VQ_ERROR_BASE - 3)
30 #define ERROR_NO_MEM (VQ_ERROR_BASE - 4)
31 #define ERROR_VRING_MAX_DESC (VQ_ERROR_BASE - 5)
32 #define ERROR_VRING_ALIGN (VQ_ERROR_BASE - 6)
33 #define ERROR_VRING_NO_BUFF (VQ_ERROR_BASE - 7)
34 #define ERROR_VQUEUE_INVLD_PARAM (VQ_ERROR_BASE - 8)
35
36 #define VQUEUE_SUCCESS 0
37
38 /* The maximum virtqueue size is 2^15. Use that value as the end of
39 * descriptor chain terminator since it will never be a valid index
40 * in the descriptor table. This is used to verify we are correctly
41 * handling vq_free_cnt.
42 */
43 #define VQ_RING_DESC_CHAIN_END 32768
44
45 /* Support for indirect buffer descriptors. */
46 #define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
47
48 /* Support to suppress interrupt until specific index is reached. */
49 #define VIRTIO_RING_F_EVENT_IDX (1 << 29)
50
51 /* cache invalidation helpers */
52 #define CACHE_FLUSH(x, s) metal_cache_flush(x, s)
53 #define CACHE_INVALIDATE(x, s) metal_cache_invalidate(x, s)
54
55 #ifdef VIRTIO_CACHED_VRINGS
56 #warning "VIRTIO_CACHED_VRINGS is deprecated, please use VIRTIO_USE_DCACHE"
57 #endif
58 #if defined(VIRTIO_CACHED_VRINGS) || defined(VIRTIO_USE_DCACHE)
59 #define VRING_FLUSH(x, s) CACHE_FLUSH(x, s)
60 #define VRING_INVALIDATE(x, s) CACHE_INVALIDATE(x, s)
61 #else
62 #define VRING_FLUSH(x, s) do { } while (0)
63 #define VRING_INVALIDATE(x, s) do { } while (0)
64 #endif /* VIRTIO_CACHED_VRINGS || VIRTIO_USE_DCACHE */
65
66 /** @brief Buffer descriptor. */
67 struct virtqueue_buf {
68 /** Address of the buffer. */
69 void *buf;
70
71 /** Size of the buffer. */
72 int len;
73 };
74
75 /** @brief Vring descriptor extra information for buffer list management. */
76 struct vq_desc_extra {
77 /** Pointer to first descriptor. */
78 void *cookie;
79
80 /** Number of chained descriptors. */
81 uint16_t ndescs;
82 };
83
84 /** @brief Local virtio queue to manage a virtio ring for sending or receiving. */
85 struct virtqueue {
86 /** Associated virtio device. */
87 struct virtio_device *vq_dev;
88
89 /** Name of the virtio queue. */
90 const char *vq_name;
91
92 /** Index of the virtio queue. */
93 uint16_t vq_queue_index;
94
95 /** Max number of buffers in the virtio queue. */
96 uint16_t vq_nentries;
97
98 /** Function to invoke, when message is available on the virtio queue. */
99 void (*callback)(struct virtqueue *vq);
100
101 /** Private data associated to the virtio queue. */
102 void *priv;
103
104 /** Function to invoke, to inform the other side about an update in the virtio queue. */
105 void (*notify)(struct virtqueue *vq);
106
107 /** Associated virtio ring. */
108 struct vring vq_ring;
109
110 /** Number of free descriptor in the virtio ring. */
111 uint16_t vq_free_cnt;
112
113 /** Number of queued buffer in the virtio ring. */
114 uint16_t vq_queued_cnt;
115
116 /**
117 * Metal I/O region of the buffers.
118 * This structure is used for conversion between virtual and physical addresses.
119 */
120 struct metal_io_region *shm_io;
121
122 /**
123 * Head of the free chain in the descriptor table. If there are no free descriptors,
124 * this will be set to VQ_RING_DESC_CHAIN_END.
125 */
126 uint16_t vq_desc_head_idx;
127
128 /** Last consumed descriptor in the used table, trails vq_ring.used->idx. */
129 uint16_t vq_used_cons_idx;
130
131 /** Last consumed descriptor in the available table, used by the consumer side. */
132 uint16_t vq_available_idx;
133
134 #ifdef VQUEUE_DEBUG
135 /** Debug counter for virtqueue reentrance check. */
136 bool vq_inuse;
137 #endif
138
139 /**
140 * Used by the host side during callback. Cookie holds the address of buffer received from
141 * other side. Other fields in this structure are not used currently.
142 */
143 struct vq_desc_extra vq_descx[0];
144 };
145
146 /** @brief Virtio ring specific information. */
147 struct vring_alloc_info {
148 /** Vring address. */
149 void *vaddr;
150
151 /** Vring alignment. */
152 uint32_t align;
153
154 /** Number of descriptors in the vring. */
155 uint16_t num_descs;
156
157 /** Padding */
158 uint16_t pad;
159 };
160
161 typedef void (*vq_callback)(struct virtqueue *);
162 typedef void (*vq_notify)(struct virtqueue *);
163
164 #ifdef VQUEUE_DEBUG
165 #include <metal/log.h>
166 #include <metal/assert.h>
167
168 #define VQASSERT(_vq, _exp, _msg) \
169 do { \
170 if (!(_exp)) { \
171 metal_log(METAL_LOG_EMERGENCY, \
172 "%s: %s - "_msg, __func__, (_vq)->vq_name); \
173 metal_assert(_exp); \
174 } \
175 } while (0)
176
177 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
178 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, "invalid ring index")
179
180 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
181 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
182 VQ_RING_DESC_CHAIN_END, \
183 "full ring terminated incorrectly: invalid head")
184
185 #define VQ_PARAM_CHK(condition, status_var, status_err) \
186 do { \
187 if (((status_var) == 0) && (condition)) { \
188 status_var = status_err; \
189 } \
190 } while (0)
191
192 #define VQUEUE_BUSY(vq) \
193 do { \
194 if (!(vq)->vq_inuse) \
195 (vq)->vq_inuse = true; \
196 else \
197 VQASSERT(vq, !(vq)->vq_inuse,\
198 "VirtQueue already in use"); \
199 } while (0)
200
201 #define VQUEUE_IDLE(vq) ((vq)->vq_inuse = false)
202
203 #else
204
205 #define VQASSERT(_vq, _exp, _msg)
206 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)
207 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)
208 #define VQ_PARAM_CHK(condition, status_var, status_err)
209 #define VQUEUE_BUSY(vq)
210 #define VQUEUE_IDLE(vq)
211
212 #endif
213
214 /**
215 * @internal
216 *
217 * @brief Creates new VirtIO queue
218 *
219 * @param device Pointer to VirtIO device
220 * @param id VirtIO queue ID , must be unique
221 * @param name Name of VirtIO queue
222 * @param ring Pointer to vring_alloc_info control block
223 * @param callback Pointer to callback function, invoked
224 * when message is available on VirtIO queue
225 * @param notify Pointer to notify function, used to notify
226 * other side that there is job available for it
227 * @param vq Created VirtIO queue.
228 *
229 * @return Function status
230 */
231 int virtqueue_create(struct virtio_device *device, unsigned short id,
232 const char *name, struct vring_alloc_info *ring,
233 void (*callback)(struct virtqueue *vq),
234 void (*notify)(struct virtqueue *vq),
235 struct virtqueue *vq);
236
237 /*
238 * virtqueue_set_shmem_io
239 *
240 * set virtqueue shared memory I/O region
241 *
242 * @vq - virt queue
243 * @io - pointer to the shared memory I/O region
244 */
virtqueue_set_shmem_io(struct virtqueue * vq,struct metal_io_region * io)245 static inline void virtqueue_set_shmem_io(struct virtqueue *vq,
246 struct metal_io_region *io)
247 {
248 vq->shm_io = io;
249 }
250
251 /**
252 * @internal
253 *
254 * @brief Enqueues new buffer in vring for consumption by other side. Readable
255 * buffers are always inserted before writable buffers
256 *
257 * @param vq Pointer to VirtIO queue control block.
258 * @param buf_list Pointer to a list of virtqueue buffers.
259 * @param readable Number of readable buffers
260 * @param writable Number of writable buffers
261 * @param cookie Pointer to hold call back data
262 *
263 * @return Function status
264 */
265 int virtqueue_add_buffer(struct virtqueue *vq, struct virtqueue_buf *buf_list,
266 int readable, int writable, void *cookie);
267
268 /**
269 * @internal
270 *
271 * @brief Returns used buffers from VirtIO queue
272 *
273 * @param vq Pointer to VirtIO queue control block
274 * @param len Length of conumed buffer
275 * @param idx Index of the buffer
276 *
277 * @return Pointer to used buffer
278 */
279 void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
280
281 /**
282 * @internal
283 *
284 * @brief Returns buffer available for use in the VirtIO queue
285 *
286 * @param vq Pointer to VirtIO queue control block
287 * @param avail_idx Pointer to index used in vring desc table
288 * @param len Length of buffer
289 *
290 * @return Pointer to available buffer
291 */
292 void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
293 uint32_t *len);
294
295 /**
296 * @internal
297 *
298 * @brief Returns consumed buffer back to VirtIO queue
299 *
300 * @param vq Pointer to VirtIO queue control block
301 * @param head_idx Index of vring desc containing used buffer
302 * @param len Length of buffer
303 *
304 * @return Function status
305 */
306 int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
307 uint32_t len);
308
309 /**
310 * @internal
311 *
312 * @brief Disables callback generation
313 *
314 * @param vq Pointer to VirtIO queue control block
315 */
316 void virtqueue_disable_cb(struct virtqueue *vq);
317
318 /**
319 * @internal
320 *
321 * @brief Enables callback generation
322 *
323 * @param vq Pointer to VirtIO queue control block
324 *
325 * @return Function status
326 */
327 int virtqueue_enable_cb(struct virtqueue *vq);
328
329 /**
330 * @internal
331 *
332 * @brief Notifies other side that there is buffer available for it.
333 *
334 * @param vq Pointer to VirtIO queue control block
335 */
336 void virtqueue_kick(struct virtqueue *vq);
337
virtqueue_allocate(unsigned int num_desc_extra)338 static inline struct virtqueue *virtqueue_allocate(unsigned int num_desc_extra)
339 {
340 struct virtqueue *vqs;
341 uint32_t vq_size = sizeof(struct virtqueue) +
342 num_desc_extra * sizeof(struct vq_desc_extra);
343
344 vqs = (struct virtqueue *)metal_allocate_memory(vq_size);
345 if (vqs) {
346 memset(vqs, 0x00, vq_size);
347 }
348
349 return vqs;
350 }
351
352 /**
353 * @internal
354 *
355 * @brief Frees VirtIO queue resources
356 *
357 * @param vq Pointer to VirtIO queue control block
358 */
359 void virtqueue_free(struct virtqueue *vq);
360
361 /**
362 * @internal
363 *
364 * @brief Dumps important virtqueue fields , use for debugging purposes
365 *
366 * @param vq Pointer to VirtIO queue control block
367 */
368 void virtqueue_dump(struct virtqueue *vq);
369
370 void virtqueue_notification(struct virtqueue *vq);
371
372 /**
373 * @internal
374 *
375 * @brief Returns vring descriptor size
376 *
377 * @param vq Pointer to VirtIO queue control block
378 *
379 * @return Descriptor length
380 */
381 uint32_t virtqueue_get_desc_size(struct virtqueue *vq);
382
383 uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx);
384 void *virtqueue_get_buffer_addr(struct virtqueue *vq, uint16_t idx);
385
386 /**
387 * @brief Test if virtqueue is empty
388 *
389 * @param vq Pointer to VirtIO queue control block
390 *
391 * @return 1 if virtqueue is empty, 0 otherwise
392 */
virtqueue_empty(struct virtqueue * vq)393 static inline int virtqueue_empty(struct virtqueue *vq)
394 {
395 return (vq->vq_nentries == vq->vq_free_cnt);
396 }
397
398 /**
399 * @brief Test if virtqueue is full
400 *
401 * @param vq Pointer to VirtIO queue control block
402 *
403 * @return 1 if virtqueue is full, 0 otherwise
404 */
virtqueue_full(struct virtqueue * vq)405 static inline int virtqueue_full(struct virtqueue *vq)
406 {
407 return (vq->vq_free_cnt == 0);
408 }
409
410 #if defined __cplusplus
411 }
412 #endif
413
414 #endif /* VIRTQUEUE_H_ */
415