1 /* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/dma-mapping.h>
27 #include <xen/xen.h>
28
29 #ifdef DEBUG
30 /* For development, we want to crash whenever the ring is screwed. */
31 #define BAD_RING(_vq, fmt, args...) \
32 do { \
33 dev_err(&(_vq)->vq.vdev->dev, \
34 "%s:"fmt, (_vq)->vq.name, ##args); \
35 BUG(); \
36 } while (0)
37 /* Caller is supposed to guarantee no reentry. */
38 #define START_USE(_vq) \
39 do { \
40 if ((_vq)->in_use) \
41 panic("%s:in_use = %i\n", \
42 (_vq)->vq.name, (_vq)->in_use); \
43 (_vq)->in_use = __LINE__; \
44 } while (0)
45 #define END_USE(_vq) \
46 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
47 #else
48 #define BAD_RING(_vq, fmt, args...) \
49 do { \
50 dev_err(&_vq->vq.vdev->dev, \
51 "%s:"fmt, (_vq)->vq.name, ##args); \
52 (_vq)->broken = true; \
53 } while (0)
54 #define START_USE(vq)
55 #define END_USE(vq)
56 #endif
57
58 struct vring_desc_state {
59 void *data; /* Data for callback. */
60 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
61 };
62
63 struct vring_virtqueue {
64 struct virtqueue vq;
65
66 /* Actual memory layout for this queue */
67 struct vring vring;
68
69 /* Can we use weak barriers? */
70 bool weak_barriers;
71
72 /* Other side has made a mess, don't try any more. */
73 bool broken;
74
75 /* Host supports indirect buffers */
76 bool indirect;
77
78 /* Host publishes avail event idx */
79 bool event;
80
81 /* Head of free buffer list. */
82 unsigned int free_head;
83 /* Number we've added since last sync. */
84 unsigned int num_added;
85
86 /* Last used index we've seen. */
87 u16 last_used_idx;
88
89 /* Last written value to avail->flags */
90 u16 avail_flags_shadow;
91
92 /* Last written value to avail->idx in guest byte order */
93 u16 avail_idx_shadow;
94
95 /* How to notify other side. FIXME: commonalize hcalls! */
96 bool (*notify)(struct virtqueue *vq);
97
98 /* DMA, allocation, and size information */
99 bool we_own_ring;
100 size_t queue_size_in_bytes;
101 dma_addr_t queue_dma_addr;
102
103 #ifdef DEBUG
104 /* They're supposed to lock for us. */
105 unsigned int in_use;
106
107 /* Figure out if their kicks are too delayed. */
108 bool last_add_time_valid;
109 ktime_t last_add_time;
110 #endif
111
112 /* Per-descriptor state. */
113 struct vring_desc_state desc_state[];
114 };
115
116 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
117
118 /*
119 * Modern virtio devices have feature bits to specify whether they need a
120 * quirk and bypass the IOMMU. If not there, just use the DMA API.
121 *
122 * If there, the interaction between virtio and DMA API is messy.
123 *
124 * On most systems with virtio, physical addresses match bus addresses,
125 * and it doesn't particularly matter whether we use the DMA API.
126 *
127 * On some systems, including Xen and any system with a physical device
128 * that speaks virtio behind a physical IOMMU, we must use the DMA API
129 * for virtio DMA to work at all.
130 *
131 * On other systems, including SPARC and PPC64, virtio-pci devices are
132 * enumerated as though they are behind an IOMMU, but the virtio host
133 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
134 * there or somehow map everything as the identity.
135 *
136 * For the time being, we preserve historic behavior and bypass the DMA
137 * API.
138 *
139 * TODO: install a per-device DMA ops structure that does the right thing
140 * taking into account all the above quirks, and use the DMA API
141 * unconditionally on data path.
142 */
143
vring_use_dma_api(struct virtio_device * vdev)144 static bool vring_use_dma_api(struct virtio_device *vdev)
145 {
146 if (!virtio_has_iommu_quirk(vdev))
147 return true;
148
149 /* Otherwise, we are left to guess. */
150 /*
151 * In theory, it's possible to have a buggy QEMU-supposed
152 * emulated Q35 IOMMU and Xen enabled at the same time. On
153 * such a configuration, virtio has never worked and will
154 * not work without an even larger kludge. Instead, enable
155 * the DMA API if we're a Xen guest, which at least allows
156 * all of the sensible Xen configurations to work correctly.
157 */
158 if (xen_domain())
159 return true;
160
161 return false;
162 }
163
164 /*
165 * The DMA ops on various arches are rather gnarly right now, and
166 * making all of the arch DMA ops work on the vring device itself
167 * is a mess. For now, we use the parent device for DMA ops.
168 */
vring_dma_dev(const struct vring_virtqueue * vq)169 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
170 {
171 return vq->vq.vdev->dev.parent;
172 }
173
174 /* Map one sg entry. */
vring_map_one_sg(const struct vring_virtqueue * vq,struct scatterlist * sg,enum dma_data_direction direction)175 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
176 struct scatterlist *sg,
177 enum dma_data_direction direction)
178 {
179 if (!vring_use_dma_api(vq->vq.vdev))
180 return (dma_addr_t)sg_phys(sg);
181
182 /*
183 * We can't use dma_map_sg, because we don't use scatterlists in
184 * the way it expects (we don't guarantee that the scatterlist
185 * will exist for the lifetime of the mapping).
186 */
187 return dma_map_page(vring_dma_dev(vq),
188 sg_page(sg), sg->offset, sg->length,
189 direction);
190 }
191
vring_map_single(const struct vring_virtqueue * vq,void * cpu_addr,size_t size,enum dma_data_direction direction)192 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
193 void *cpu_addr, size_t size,
194 enum dma_data_direction direction)
195 {
196 if (!vring_use_dma_api(vq->vq.vdev))
197 return (dma_addr_t)virt_to_phys(cpu_addr);
198
199 return dma_map_single(vring_dma_dev(vq),
200 cpu_addr, size, direction);
201 }
202
vring_unmap_one(const struct vring_virtqueue * vq,struct vring_desc * desc)203 static void vring_unmap_one(const struct vring_virtqueue *vq,
204 struct vring_desc *desc)
205 {
206 u16 flags;
207
208 if (!vring_use_dma_api(vq->vq.vdev))
209 return;
210
211 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
212
213 if (flags & VRING_DESC_F_INDIRECT) {
214 dma_unmap_single(vring_dma_dev(vq),
215 virtio64_to_cpu(vq->vq.vdev, desc->addr),
216 virtio32_to_cpu(vq->vq.vdev, desc->len),
217 (flags & VRING_DESC_F_WRITE) ?
218 DMA_FROM_DEVICE : DMA_TO_DEVICE);
219 } else {
220 dma_unmap_page(vring_dma_dev(vq),
221 virtio64_to_cpu(vq->vq.vdev, desc->addr),
222 virtio32_to_cpu(vq->vq.vdev, desc->len),
223 (flags & VRING_DESC_F_WRITE) ?
224 DMA_FROM_DEVICE : DMA_TO_DEVICE);
225 }
226 }
227
vring_mapping_error(const struct vring_virtqueue * vq,dma_addr_t addr)228 static int vring_mapping_error(const struct vring_virtqueue *vq,
229 dma_addr_t addr)
230 {
231 if (!vring_use_dma_api(vq->vq.vdev))
232 return 0;
233
234 return dma_mapping_error(vring_dma_dev(vq), addr);
235 }
236
alloc_indirect(struct virtqueue * _vq,unsigned int total_sg,gfp_t gfp)237 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
238 unsigned int total_sg, gfp_t gfp)
239 {
240 struct vring_desc *desc;
241 unsigned int i;
242
243 /*
244 * We require lowmem mappings for the descriptors because
245 * otherwise virt_to_phys will give us bogus addresses in the
246 * virtqueue.
247 */
248 gfp &= ~__GFP_HIGHMEM;
249
250 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
251 if (!desc)
252 return NULL;
253
254 for (i = 0; i < total_sg; i++)
255 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
256 return desc;
257 }
258
virtqueue_add(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)259 static inline int virtqueue_add(struct virtqueue *_vq,
260 struct scatterlist *sgs[],
261 unsigned int total_sg,
262 unsigned int out_sgs,
263 unsigned int in_sgs,
264 void *data,
265 void *ctx,
266 gfp_t gfp)
267 {
268 struct vring_virtqueue *vq = to_vvq(_vq);
269 struct scatterlist *sg;
270 struct vring_desc *desc;
271 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
272 int head;
273 bool indirect;
274
275 START_USE(vq);
276
277 BUG_ON(data == NULL);
278 BUG_ON(ctx && vq->indirect);
279
280 if (unlikely(vq->broken)) {
281 END_USE(vq);
282 return -EIO;
283 }
284
285 #ifdef DEBUG
286 {
287 ktime_t now = ktime_get();
288
289 /* No kick or get, with .1 second between? Warn. */
290 if (vq->last_add_time_valid)
291 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
292 > 100);
293 vq->last_add_time = now;
294 vq->last_add_time_valid = true;
295 }
296 #endif
297
298 BUG_ON(total_sg == 0);
299
300 head = vq->free_head;
301
302 /* If the host supports indirect descriptor tables, and we have multiple
303 * buffers, then go indirect. FIXME: tune this threshold */
304 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
305 desc = alloc_indirect(_vq, total_sg, gfp);
306 else {
307 desc = NULL;
308 WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
309 }
310
311 if (desc) {
312 /* Use a single buffer which doesn't continue */
313 indirect = true;
314 /* Set up rest to use this indirect table. */
315 i = 0;
316 descs_used = 1;
317 } else {
318 indirect = false;
319 desc = vq->vring.desc;
320 i = head;
321 descs_used = total_sg;
322 }
323
324 if (vq->vq.num_free < descs_used) {
325 pr_debug("Can't add buf len %i - avail = %i\n",
326 descs_used, vq->vq.num_free);
327 /* FIXME: for historical reasons, we force a notify here if
328 * there are outgoing parts to the buffer. Presumably the
329 * host should service the ring ASAP. */
330 if (out_sgs)
331 vq->notify(&vq->vq);
332 if (indirect)
333 kfree(desc);
334 END_USE(vq);
335 return -ENOSPC;
336 }
337
338 for (n = 0; n < out_sgs; n++) {
339 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
340 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
341 if (vring_mapping_error(vq, addr))
342 goto unmap_release;
343
344 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
345 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
346 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
347 prev = i;
348 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
349 }
350 }
351 for (; n < (out_sgs + in_sgs); n++) {
352 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
353 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
354 if (vring_mapping_error(vq, addr))
355 goto unmap_release;
356
357 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
358 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
359 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
360 prev = i;
361 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
362 }
363 }
364 /* Last one doesn't continue. */
365 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
366
367 if (indirect) {
368 /* Now that the indirect table is filled in, map it. */
369 dma_addr_t addr = vring_map_single(
370 vq, desc, total_sg * sizeof(struct vring_desc),
371 DMA_TO_DEVICE);
372 if (vring_mapping_error(vq, addr))
373 goto unmap_release;
374
375 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
376 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
377
378 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
379 }
380
381 /* We're using some buffers from the free list. */
382 vq->vq.num_free -= descs_used;
383
384 /* Update free pointer */
385 if (indirect)
386 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
387 else
388 vq->free_head = i;
389
390 /* Store token and indirect buffer state. */
391 vq->desc_state[head].data = data;
392 if (indirect)
393 vq->desc_state[head].indir_desc = desc;
394 else
395 vq->desc_state[head].indir_desc = ctx;
396
397 /* Put entry in available array (but don't update avail->idx until they
398 * do sync). */
399 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
400 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
401
402 /* Descriptors and available array need to be set before we expose the
403 * new available array entries. */
404 virtio_wmb(vq->weak_barriers);
405 vq->avail_idx_shadow++;
406 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
407 vq->num_added++;
408
409 pr_debug("Added buffer head %i to %p\n", head, vq);
410 END_USE(vq);
411
412 /* This is very unlikely, but theoretically possible. Kick
413 * just in case. */
414 if (unlikely(vq->num_added == (1 << 16) - 1))
415 virtqueue_kick(_vq);
416
417 return 0;
418
419 unmap_release:
420 err_idx = i;
421 i = head;
422
423 for (n = 0; n < total_sg; n++) {
424 if (i == err_idx)
425 break;
426 vring_unmap_one(vq, &desc[i]);
427 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
428 }
429
430 if (indirect)
431 kfree(desc);
432
433 END_USE(vq);
434 return -EIO;
435 }
436
437 /**
438 * virtqueue_add_sgs - expose buffers to other end
439 * @vq: the struct virtqueue we're talking about.
440 * @sgs: array of terminated scatterlists.
441 * @out_num: the number of scatterlists readable by other side
442 * @in_num: the number of scatterlists which are writable (after readable ones)
443 * @data: the token identifying the buffer.
444 * @gfp: how to do memory allocations (if necessary).
445 *
446 * Caller must ensure we don't call this with other virtqueue operations
447 * at the same time (except where noted).
448 *
449 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
450 */
virtqueue_add_sgs(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)451 int virtqueue_add_sgs(struct virtqueue *_vq,
452 struct scatterlist *sgs[],
453 unsigned int out_sgs,
454 unsigned int in_sgs,
455 void *data,
456 gfp_t gfp)
457 {
458 unsigned int i, total_sg = 0;
459
460 /* Count them first. */
461 for (i = 0; i < out_sgs + in_sgs; i++) {
462 struct scatterlist *sg;
463 for (sg = sgs[i]; sg; sg = sg_next(sg))
464 total_sg++;
465 }
466 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
467 data, NULL, gfp);
468 }
469 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
470
471 /**
472 * virtqueue_add_outbuf - expose output buffers to other end
473 * @vq: the struct virtqueue we're talking about.
474 * @sg: scatterlist (must be well-formed and terminated!)
475 * @num: the number of entries in @sg readable by other side
476 * @data: the token identifying the buffer.
477 * @gfp: how to do memory allocations (if necessary).
478 *
479 * Caller must ensure we don't call this with other virtqueue operations
480 * at the same time (except where noted).
481 *
482 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
483 */
virtqueue_add_outbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)484 int virtqueue_add_outbuf(struct virtqueue *vq,
485 struct scatterlist *sg, unsigned int num,
486 void *data,
487 gfp_t gfp)
488 {
489 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
490 }
491 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
492
493 /**
494 * virtqueue_add_inbuf - expose input buffers to other end
495 * @vq: the struct virtqueue we're talking about.
496 * @sg: scatterlist (must be well-formed and terminated!)
497 * @num: the number of entries in @sg writable by other side
498 * @data: the token identifying the buffer.
499 * @gfp: how to do memory allocations (if necessary).
500 *
501 * Caller must ensure we don't call this with other virtqueue operations
502 * at the same time (except where noted).
503 *
504 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
505 */
virtqueue_add_inbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)506 int virtqueue_add_inbuf(struct virtqueue *vq,
507 struct scatterlist *sg, unsigned int num,
508 void *data,
509 gfp_t gfp)
510 {
511 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
512 }
513 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
514
515 /**
516 * virtqueue_add_inbuf_ctx - expose input buffers to other end
517 * @vq: the struct virtqueue we're talking about.
518 * @sg: scatterlist (must be well-formed and terminated!)
519 * @num: the number of entries in @sg writable by other side
520 * @data: the token identifying the buffer.
521 * @ctx: extra context for the token
522 * @gfp: how to do memory allocations (if necessary).
523 *
524 * Caller must ensure we don't call this with other virtqueue operations
525 * at the same time (except where noted).
526 *
527 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
528 */
virtqueue_add_inbuf_ctx(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,void * ctx,gfp_t gfp)529 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
530 struct scatterlist *sg, unsigned int num,
531 void *data,
532 void *ctx,
533 gfp_t gfp)
534 {
535 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
536 }
537 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
538
539 /**
540 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
541 * @vq: the struct virtqueue
542 *
543 * Instead of virtqueue_kick(), you can do:
544 * if (virtqueue_kick_prepare(vq))
545 * virtqueue_notify(vq);
546 *
547 * This is sometimes useful because the virtqueue_kick_prepare() needs
548 * to be serialized, but the actual virtqueue_notify() call does not.
549 */
virtqueue_kick_prepare(struct virtqueue * _vq)550 bool virtqueue_kick_prepare(struct virtqueue *_vq)
551 {
552 struct vring_virtqueue *vq = to_vvq(_vq);
553 u16 new, old;
554 bool needs_kick;
555
556 START_USE(vq);
557 /* We need to expose available array entries before checking avail
558 * event. */
559 virtio_mb(vq->weak_barriers);
560
561 old = vq->avail_idx_shadow - vq->num_added;
562 new = vq->avail_idx_shadow;
563 vq->num_added = 0;
564
565 #ifdef DEBUG
566 if (vq->last_add_time_valid) {
567 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
568 vq->last_add_time)) > 100);
569 }
570 vq->last_add_time_valid = false;
571 #endif
572
573 if (vq->event) {
574 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
575 new, old);
576 } else {
577 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
578 }
579 END_USE(vq);
580 return needs_kick;
581 }
582 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
583
584 /**
585 * virtqueue_notify - second half of split virtqueue_kick call.
586 * @vq: the struct virtqueue
587 *
588 * This does not need to be serialized.
589 *
590 * Returns false if host notify failed or queue is broken, otherwise true.
591 */
virtqueue_notify(struct virtqueue * _vq)592 bool virtqueue_notify(struct virtqueue *_vq)
593 {
594 struct vring_virtqueue *vq = to_vvq(_vq);
595
596 if (unlikely(vq->broken))
597 return false;
598
599 /* Prod other side to tell it about changes. */
600 if (!vq->notify(_vq)) {
601 vq->broken = true;
602 return false;
603 }
604 return true;
605 }
606 EXPORT_SYMBOL_GPL(virtqueue_notify);
607
608 /**
609 * virtqueue_kick - update after add_buf
610 * @vq: the struct virtqueue
611 *
612 * After one or more virtqueue_add_* calls, invoke this to kick
613 * the other side.
614 *
615 * Caller must ensure we don't call this with other virtqueue
616 * operations at the same time (except where noted).
617 *
618 * Returns false if kick failed, otherwise true.
619 */
virtqueue_kick(struct virtqueue * vq)620 bool virtqueue_kick(struct virtqueue *vq)
621 {
622 if (virtqueue_kick_prepare(vq))
623 return virtqueue_notify(vq);
624 return true;
625 }
626 EXPORT_SYMBOL_GPL(virtqueue_kick);
627
detach_buf(struct vring_virtqueue * vq,unsigned int head,void ** ctx)628 static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
629 void **ctx)
630 {
631 unsigned int i, j;
632 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
633
634 /* Clear data ptr. */
635 vq->desc_state[head].data = NULL;
636
637 /* Put back on free list: unmap first-level descriptors and find end */
638 i = head;
639
640 while (vq->vring.desc[i].flags & nextflag) {
641 vring_unmap_one(vq, &vq->vring.desc[i]);
642 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
643 vq->vq.num_free++;
644 }
645
646 vring_unmap_one(vq, &vq->vring.desc[i]);
647 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
648 vq->free_head = head;
649
650 /* Plus final descriptor */
651 vq->vq.num_free++;
652
653 if (vq->indirect) {
654 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
655 u32 len;
656
657 /* Free the indirect table, if any, now that it's unmapped. */
658 if (!indir_desc)
659 return;
660
661 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
662
663 BUG_ON(!(vq->vring.desc[head].flags &
664 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
665 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
666
667 for (j = 0; j < len / sizeof(struct vring_desc); j++)
668 vring_unmap_one(vq, &indir_desc[j]);
669
670 kfree(indir_desc);
671 vq->desc_state[head].indir_desc = NULL;
672 } else if (ctx) {
673 *ctx = vq->desc_state[head].indir_desc;
674 }
675 }
676
more_used(const struct vring_virtqueue * vq)677 static inline bool more_used(const struct vring_virtqueue *vq)
678 {
679 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
680 }
681
682 /**
683 * virtqueue_get_buf - get the next used buffer
684 * @vq: the struct virtqueue we're talking about.
685 * @len: the length written into the buffer
686 *
687 * If the device wrote data into the buffer, @len will be set to the
688 * amount written. This means you don't need to clear the buffer
689 * beforehand to ensure there's no data leakage in the case of short
690 * writes.
691 *
692 * Caller must ensure we don't call this with other virtqueue
693 * operations at the same time (except where noted).
694 *
695 * Returns NULL if there are no used buffers, or the "data" token
696 * handed to virtqueue_add_*().
697 */
virtqueue_get_buf_ctx(struct virtqueue * _vq,unsigned int * len,void ** ctx)698 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
699 void **ctx)
700 {
701 struct vring_virtqueue *vq = to_vvq(_vq);
702 void *ret;
703 unsigned int i;
704 u16 last_used;
705
706 START_USE(vq);
707
708 if (unlikely(vq->broken)) {
709 END_USE(vq);
710 return NULL;
711 }
712
713 if (!more_used(vq)) {
714 pr_debug("No more buffers in queue\n");
715 END_USE(vq);
716 return NULL;
717 }
718
719 /* Only get used array entries after they have been exposed by host. */
720 virtio_rmb(vq->weak_barriers);
721
722 last_used = (vq->last_used_idx & (vq->vring.num - 1));
723 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
724 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
725
726 if (unlikely(i >= vq->vring.num)) {
727 BAD_RING(vq, "id %u out of range\n", i);
728 return NULL;
729 }
730 if (unlikely(!vq->desc_state[i].data)) {
731 BAD_RING(vq, "id %u is not a head!\n", i);
732 return NULL;
733 }
734
735 /* detach_buf clears data, so grab it now. */
736 ret = vq->desc_state[i].data;
737 detach_buf(vq, i, ctx);
738 vq->last_used_idx++;
739 /* If we expect an interrupt for the next entry, tell host
740 * by writing event index and flush out the write before
741 * the read in the next get_buf call. */
742 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
743 virtio_store_mb(vq->weak_barriers,
744 &vring_used_event(&vq->vring),
745 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
746
747 #ifdef DEBUG
748 vq->last_add_time_valid = false;
749 #endif
750
751 END_USE(vq);
752 return ret;
753 }
754 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
755
virtqueue_get_buf(struct virtqueue * _vq,unsigned int * len)756 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
757 {
758 return virtqueue_get_buf_ctx(_vq, len, NULL);
759 }
760 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
761 /**
762 * virtqueue_disable_cb - disable callbacks
763 * @vq: the struct virtqueue we're talking about.
764 *
765 * Note that this is not necessarily synchronous, hence unreliable and only
766 * useful as an optimization.
767 *
768 * Unlike other operations, this need not be serialized.
769 */
virtqueue_disable_cb(struct virtqueue * _vq)770 void virtqueue_disable_cb(struct virtqueue *_vq)
771 {
772 struct vring_virtqueue *vq = to_vvq(_vq);
773
774 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
775 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
776 if (!vq->event)
777 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
778 }
779
780 }
781 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
782
783 /**
784 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
785 * @vq: the struct virtqueue we're talking about.
786 *
787 * This re-enables callbacks; it returns current queue state
788 * in an opaque unsigned value. This value should be later tested by
789 * virtqueue_poll, to detect a possible race between the driver checking for
790 * more work, and enabling callbacks.
791 *
792 * Caller must ensure we don't call this with other virtqueue
793 * operations at the same time (except where noted).
794 */
virtqueue_enable_cb_prepare(struct virtqueue * _vq)795 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
796 {
797 struct vring_virtqueue *vq = to_vvq(_vq);
798 u16 last_used_idx;
799
800 START_USE(vq);
801
802 /* We optimistically turn back on interrupts, then check if there was
803 * more to do. */
804 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
805 * either clear the flags bit or point the event index at the next
806 * entry. Always do both to keep code simple. */
807 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
808 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
809 if (!vq->event)
810 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
811 }
812 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
813 END_USE(vq);
814 return last_used_idx;
815 }
816 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
817
818 /**
819 * virtqueue_poll - query pending used buffers
820 * @vq: the struct virtqueue we're talking about.
821 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
822 *
823 * Returns "true" if there are pending used buffers in the queue.
824 *
825 * This does not need to be serialized.
826 */
virtqueue_poll(struct virtqueue * _vq,unsigned last_used_idx)827 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
828 {
829 struct vring_virtqueue *vq = to_vvq(_vq);
830
831 virtio_mb(vq->weak_barriers);
832 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
833 }
834 EXPORT_SYMBOL_GPL(virtqueue_poll);
835
836 /**
837 * virtqueue_enable_cb - restart callbacks after disable_cb.
838 * @vq: the struct virtqueue we're talking about.
839 *
840 * This re-enables callbacks; it returns "false" if there are pending
841 * buffers in the queue, to detect a possible race between the driver
842 * checking for more work, and enabling callbacks.
843 *
844 * Caller must ensure we don't call this with other virtqueue
845 * operations at the same time (except where noted).
846 */
virtqueue_enable_cb(struct virtqueue * _vq)847 bool virtqueue_enable_cb(struct virtqueue *_vq)
848 {
849 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
850 return !virtqueue_poll(_vq, last_used_idx);
851 }
852 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
853
854 /**
855 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
856 * @vq: the struct virtqueue we're talking about.
857 *
858 * This re-enables callbacks but hints to the other side to delay
859 * interrupts until most of the available buffers have been processed;
860 * it returns "false" if there are many pending buffers in the queue,
861 * to detect a possible race between the driver checking for more work,
862 * and enabling callbacks.
863 *
864 * Caller must ensure we don't call this with other virtqueue
865 * operations at the same time (except where noted).
866 */
virtqueue_enable_cb_delayed(struct virtqueue * _vq)867 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
868 {
869 struct vring_virtqueue *vq = to_vvq(_vq);
870 u16 bufs;
871
872 START_USE(vq);
873
874 /* We optimistically turn back on interrupts, then check if there was
875 * more to do. */
876 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
877 * either clear the flags bit or point the event index at the next
878 * entry. Always update the event index to keep code simple. */
879 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
880 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
881 if (!vq->event)
882 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
883 }
884 /* TODO: tune this threshold */
885 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
886
887 virtio_store_mb(vq->weak_barriers,
888 &vring_used_event(&vq->vring),
889 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
890
891 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
892 END_USE(vq);
893 return false;
894 }
895
896 END_USE(vq);
897 return true;
898 }
899 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
900
901 /**
902 * virtqueue_detach_unused_buf - detach first unused buffer
903 * @vq: the struct virtqueue we're talking about.
904 *
905 * Returns NULL or the "data" token handed to virtqueue_add_*().
906 * This is not valid on an active queue; it is useful only for device
907 * shutdown.
908 */
virtqueue_detach_unused_buf(struct virtqueue * _vq)909 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
910 {
911 struct vring_virtqueue *vq = to_vvq(_vq);
912 unsigned int i;
913 void *buf;
914
915 START_USE(vq);
916
917 for (i = 0; i < vq->vring.num; i++) {
918 if (!vq->desc_state[i].data)
919 continue;
920 /* detach_buf clears data, so grab it now. */
921 buf = vq->desc_state[i].data;
922 detach_buf(vq, i, NULL);
923 vq->avail_idx_shadow--;
924 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
925 END_USE(vq);
926 return buf;
927 }
928 /* That should have freed everything. */
929 BUG_ON(vq->vq.num_free != vq->vring.num);
930
931 END_USE(vq);
932 return NULL;
933 }
934 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
935
vring_interrupt(int irq,void * _vq)936 irqreturn_t vring_interrupt(int irq, void *_vq)
937 {
938 struct vring_virtqueue *vq = to_vvq(_vq);
939
940 if (!more_used(vq)) {
941 pr_debug("virtqueue interrupt with no work for %p\n", vq);
942 return IRQ_NONE;
943 }
944
945 if (unlikely(vq->broken))
946 return IRQ_HANDLED;
947
948 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
949 if (vq->vq.callback)
950 vq->vq.callback(&vq->vq);
951
952 return IRQ_HANDLED;
953 }
954 EXPORT_SYMBOL_GPL(vring_interrupt);
955
__vring_new_virtqueue(unsigned int index,struct vring vring,struct virtio_device * vdev,bool weak_barriers,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)956 struct virtqueue *__vring_new_virtqueue(unsigned int index,
957 struct vring vring,
958 struct virtio_device *vdev,
959 bool weak_barriers,
960 bool context,
961 bool (*notify)(struct virtqueue *),
962 void (*callback)(struct virtqueue *),
963 const char *name)
964 {
965 unsigned int i;
966 struct vring_virtqueue *vq;
967
968 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
969 GFP_KERNEL);
970 if (!vq)
971 return NULL;
972
973 vq->vring = vring;
974 vq->vq.callback = callback;
975 vq->vq.vdev = vdev;
976 vq->vq.name = name;
977 vq->vq.num_free = vring.num;
978 vq->vq.index = index;
979 vq->we_own_ring = false;
980 vq->queue_dma_addr = 0;
981 vq->queue_size_in_bytes = 0;
982 vq->notify = notify;
983 vq->weak_barriers = weak_barriers;
984 vq->broken = false;
985 vq->last_used_idx = 0;
986 vq->avail_flags_shadow = 0;
987 vq->avail_idx_shadow = 0;
988 vq->num_added = 0;
989 list_add_tail(&vq->vq.list, &vdev->vqs);
990 #ifdef DEBUG
991 vq->in_use = false;
992 vq->last_add_time_valid = false;
993 #endif
994
995 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
996 !context;
997 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
998
999 /* No callback? Tell other side not to bother us. */
1000 if (!callback) {
1001 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
1002 if (!vq->event)
1003 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
1004 }
1005
1006 /* Put everything in free lists. */
1007 vq->free_head = 0;
1008 for (i = 0; i < vring.num-1; i++)
1009 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
1010 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
1011
1012 return &vq->vq;
1013 }
1014 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1015
vring_alloc_queue(struct virtio_device * vdev,size_t size,dma_addr_t * dma_handle,gfp_t flag)1016 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1017 dma_addr_t *dma_handle, gfp_t flag)
1018 {
1019 if (vring_use_dma_api(vdev)) {
1020 return dma_alloc_coherent(vdev->dev.parent, size,
1021 dma_handle, flag);
1022 } else {
1023 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1024 if (queue) {
1025 phys_addr_t phys_addr = virt_to_phys(queue);
1026 *dma_handle = (dma_addr_t)phys_addr;
1027
1028 /*
1029 * Sanity check: make sure we dind't truncate
1030 * the address. The only arches I can find that
1031 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1032 * are certain non-highmem MIPS and x86
1033 * configurations, but these configurations
1034 * should never allocate physical pages above 32
1035 * bits, so this is fine. Just in case, throw a
1036 * warning and abort if we end up with an
1037 * unrepresentable address.
1038 */
1039 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1040 free_pages_exact(queue, PAGE_ALIGN(size));
1041 return NULL;
1042 }
1043 }
1044 return queue;
1045 }
1046 }
1047
vring_free_queue(struct virtio_device * vdev,size_t size,void * queue,dma_addr_t dma_handle)1048 static void vring_free_queue(struct virtio_device *vdev, size_t size,
1049 void *queue, dma_addr_t dma_handle)
1050 {
1051 if (vring_use_dma_api(vdev)) {
1052 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1053 } else {
1054 free_pages_exact(queue, PAGE_ALIGN(size));
1055 }
1056 }
1057
vring_create_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)1058 struct virtqueue *vring_create_virtqueue(
1059 unsigned int index,
1060 unsigned int num,
1061 unsigned int vring_align,
1062 struct virtio_device *vdev,
1063 bool weak_barriers,
1064 bool may_reduce_num,
1065 bool context,
1066 bool (*notify)(struct virtqueue *),
1067 void (*callback)(struct virtqueue *),
1068 const char *name)
1069 {
1070 struct virtqueue *vq;
1071 void *queue = NULL;
1072 dma_addr_t dma_addr;
1073 size_t queue_size_in_bytes;
1074 struct vring vring;
1075
1076 /* We assume num is a power of 2. */
1077 if (num & (num - 1)) {
1078 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1079 return NULL;
1080 }
1081
1082 /* TODO: allocate each queue chunk individually */
1083 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1084 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1085 &dma_addr,
1086 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1087 if (queue)
1088 break;
1089 }
1090
1091 if (!num)
1092 return NULL;
1093
1094 if (!queue) {
1095 /* Try to get a single page. You are my only hope! */
1096 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1097 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1098 }
1099 if (!queue)
1100 return NULL;
1101
1102 queue_size_in_bytes = vring_size(num, vring_align);
1103 vring_init(&vring, num, queue, vring_align);
1104
1105 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1106 notify, callback, name);
1107 if (!vq) {
1108 vring_free_queue(vdev, queue_size_in_bytes, queue,
1109 dma_addr);
1110 return NULL;
1111 }
1112
1113 to_vvq(vq)->queue_dma_addr = dma_addr;
1114 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1115 to_vvq(vq)->we_own_ring = true;
1116
1117 return vq;
1118 }
1119 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1120
vring_new_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool context,void * pages,bool (* notify)(struct virtqueue * vq),void (* callback)(struct virtqueue * vq),const char * name)1121 struct virtqueue *vring_new_virtqueue(unsigned int index,
1122 unsigned int num,
1123 unsigned int vring_align,
1124 struct virtio_device *vdev,
1125 bool weak_barriers,
1126 bool context,
1127 void *pages,
1128 bool (*notify)(struct virtqueue *vq),
1129 void (*callback)(struct virtqueue *vq),
1130 const char *name)
1131 {
1132 struct vring vring;
1133 vring_init(&vring, num, pages, vring_align);
1134 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1135 notify, callback, name);
1136 }
1137 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1138
vring_del_virtqueue(struct virtqueue * _vq)1139 void vring_del_virtqueue(struct virtqueue *_vq)
1140 {
1141 struct vring_virtqueue *vq = to_vvq(_vq);
1142
1143 if (vq->we_own_ring) {
1144 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1145 vq->vring.desc, vq->queue_dma_addr);
1146 }
1147 list_del(&_vq->list);
1148 kfree(vq);
1149 }
1150 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1151
1152 /* Manipulates transport-specific feature bits. */
vring_transport_features(struct virtio_device * vdev)1153 void vring_transport_features(struct virtio_device *vdev)
1154 {
1155 unsigned int i;
1156
1157 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1158 switch (i) {
1159 case VIRTIO_RING_F_INDIRECT_DESC:
1160 break;
1161 case VIRTIO_RING_F_EVENT_IDX:
1162 break;
1163 case VIRTIO_F_VERSION_1:
1164 break;
1165 case VIRTIO_F_IOMMU_PLATFORM:
1166 break;
1167 default:
1168 /* We don't understand this bit. */
1169 __virtio_clear_bit(vdev, i);
1170 }
1171 }
1172 }
1173 EXPORT_SYMBOL_GPL(vring_transport_features);
1174
1175 /**
1176 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1177 * @vq: the struct virtqueue containing the vring of interest.
1178 *
1179 * Returns the size of the vring. This is mainly used for boasting to
1180 * userspace. Unlike other operations, this need not be serialized.
1181 */
virtqueue_get_vring_size(struct virtqueue * _vq)1182 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1183 {
1184
1185 struct vring_virtqueue *vq = to_vvq(_vq);
1186
1187 return vq->vring.num;
1188 }
1189 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1190
virtqueue_is_broken(struct virtqueue * _vq)1191 bool virtqueue_is_broken(struct virtqueue *_vq)
1192 {
1193 struct vring_virtqueue *vq = to_vvq(_vq);
1194
1195 return vq->broken;
1196 }
1197 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1198
1199 /*
1200 * This should prevent the device from being used, allowing drivers to
1201 * recover. You may need to grab appropriate locks to flush.
1202 */
virtio_break_device(struct virtio_device * dev)1203 void virtio_break_device(struct virtio_device *dev)
1204 {
1205 struct virtqueue *_vq;
1206
1207 list_for_each_entry(_vq, &dev->vqs, list) {
1208 struct vring_virtqueue *vq = to_vvq(_vq);
1209 vq->broken = true;
1210 }
1211 }
1212 EXPORT_SYMBOL_GPL(virtio_break_device);
1213
virtqueue_get_desc_addr(struct virtqueue * _vq)1214 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1215 {
1216 struct vring_virtqueue *vq = to_vvq(_vq);
1217
1218 BUG_ON(!vq->we_own_ring);
1219
1220 return vq->queue_dma_addr;
1221 }
1222 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1223
virtqueue_get_avail_addr(struct virtqueue * _vq)1224 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1225 {
1226 struct vring_virtqueue *vq = to_vvq(_vq);
1227
1228 BUG_ON(!vq->we_own_ring);
1229
1230 return vq->queue_dma_addr +
1231 ((char *)vq->vring.avail - (char *)vq->vring.desc);
1232 }
1233 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1234
virtqueue_get_used_addr(struct virtqueue * _vq)1235 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1236 {
1237 struct vring_virtqueue *vq = to_vvq(_vq);
1238
1239 BUG_ON(!vq->we_own_ring);
1240
1241 return vq->queue_dma_addr +
1242 ((char *)vq->vring.used - (char *)vq->vring.desc);
1243 }
1244 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1245
virtqueue_get_vring(struct virtqueue * vq)1246 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1247 {
1248 return &to_vvq(vq)->vring;
1249 }
1250 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1251
1252 MODULE_LICENSE("GPL");
1253