Lines Matching refs:vq
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) argument
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) argument
57 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
59 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
62 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_big() argument
64 vq->user_be = true; in vhost_enable_cross_endian_big()
67 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_little() argument
69 vq->user_be = false; in vhost_enable_cross_endian_little()
72 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
76 if (vq->private_data) in vhost_set_vring_endian()
87 vhost_enable_cross_endian_big(vq); in vhost_set_vring_endian()
89 vhost_enable_cross_endian_little(vq); in vhost_set_vring_endian()
94 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
99 .num = vq->user_be in vhost_get_vring_endian()
108 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
115 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; in vhost_init_is_le()
118 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
122 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
127 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
133 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
135 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) in vhost_init_is_le()
140 static void vhost_reset_is_le(struct vhost_virtqueue *vq) in vhost_reset_is_le() argument
142 vhost_init_is_le(vq); in vhost_reset_is_le()
284 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) in __vhost_vq_meta_reset() argument
289 vq->meta_iotlb[j] = NULL; in __vhost_vq_meta_reset()
301 struct vhost_virtqueue *vq) in vhost_vq_reset() argument
303 vq->num = 1; in vhost_vq_reset()
304 vq->desc = NULL; in vhost_vq_reset()
305 vq->avail = NULL; in vhost_vq_reset()
306 vq->used = NULL; in vhost_vq_reset()
307 vq->last_avail_idx = 0; in vhost_vq_reset()
308 vq->avail_idx = 0; in vhost_vq_reset()
309 vq->last_used_idx = 0; in vhost_vq_reset()
310 vq->signalled_used = 0; in vhost_vq_reset()
311 vq->signalled_used_valid = false; in vhost_vq_reset()
312 vq->used_flags = 0; in vhost_vq_reset()
313 vq->log_used = false; in vhost_vq_reset()
314 vq->log_addr = -1ull; in vhost_vq_reset()
315 vq->private_data = NULL; in vhost_vq_reset()
316 vq->acked_features = 0; in vhost_vq_reset()
317 vq->acked_backend_features = 0; in vhost_vq_reset()
318 vq->log_base = NULL; in vhost_vq_reset()
319 vq->error_ctx = NULL; in vhost_vq_reset()
320 vq->kick = NULL; in vhost_vq_reset()
321 vq->call_ctx = NULL; in vhost_vq_reset()
322 vq->log_ctx = NULL; in vhost_vq_reset()
323 vhost_reset_is_le(vq); in vhost_vq_reset()
324 vhost_disable_cross_endian(vq); in vhost_vq_reset()
325 vq->busyloop_timeout = 0; in vhost_vq_reset()
326 vq->umem = NULL; in vhost_vq_reset()
327 vq->iotlb = NULL; in vhost_vq_reset()
328 __vhost_vq_meta_reset(vq); in vhost_vq_reset()
370 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) in vhost_vq_free_iovecs() argument
372 kfree(vq->indirect); in vhost_vq_free_iovecs()
373 vq->indirect = NULL; in vhost_vq_free_iovecs()
374 kfree(vq->log); in vhost_vq_free_iovecs()
375 vq->log = NULL; in vhost_vq_free_iovecs()
376 kfree(vq->heads); in vhost_vq_free_iovecs()
377 vq->heads = NULL; in vhost_vq_free_iovecs()
383 struct vhost_virtqueue *vq; in vhost_dev_alloc_iovecs() local
387 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
388 vq->indirect = kmalloc_array(UIO_MAXIOV, in vhost_dev_alloc_iovecs()
389 sizeof(*vq->indirect), in vhost_dev_alloc_iovecs()
391 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), in vhost_dev_alloc_iovecs()
393 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), in vhost_dev_alloc_iovecs()
395 if (!vq->indirect || !vq->log || !vq->heads) in vhost_dev_alloc_iovecs()
414 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, in vhost_exceeds_weight() argument
417 struct vhost_dev *dev = vq->dev; in vhost_exceeds_weight()
421 vhost_poll_queue(&vq->poll); in vhost_exceeds_weight()
429 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, in vhost_get_avail_size() argument
433 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vhost_get_avail_size()
435 return sizeof(*vq->avail) + in vhost_get_avail_size()
436 sizeof(*vq->avail->ring) * num + event; in vhost_get_avail_size()
439 static size_t vhost_get_used_size(struct vhost_virtqueue *vq, in vhost_get_used_size() argument
443 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vhost_get_used_size()
445 return sizeof(*vq->used) + in vhost_get_used_size()
446 sizeof(*vq->used->ring) * num + event; in vhost_get_used_size()
449 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, in vhost_get_desc_size() argument
452 return sizeof(*vq->desc) * num; in vhost_get_desc_size()
459 struct vhost_virtqueue *vq; in vhost_dev_init() local
481 vq = dev->vqs[i]; in vhost_dev_init()
482 vq->log = NULL; in vhost_dev_init()
483 vq->indirect = NULL; in vhost_dev_init()
484 vq->heads = NULL; in vhost_dev_init()
485 vq->dev = dev; in vhost_dev_init()
486 mutex_init(&vq->mutex); in vhost_dev_init()
487 vhost_vq_reset(dev, vq); in vhost_dev_init()
488 if (vq->handle_kick) in vhost_dev_init()
489 vhost_poll_init(&vq->poll, vq->handle_kick, in vhost_dev_init()
738 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, in vhost_vq_meta_fetch() argument
742 const struct vhost_umem_node *node = vq->meta_iotlb[type]; in vhost_vq_meta_fetch()
776 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
779 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, in vhost_copy_to_user() argument
784 if (!vq->iotlb) in vhost_copy_to_user()
793 void __user *uaddr = vhost_vq_meta_fetch(vq, in vhost_copy_to_user()
800 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, in vhost_copy_to_user()
801 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_to_user()
805 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size); in vhost_copy_to_user()
814 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, in vhost_copy_from_user() argument
819 if (!vq->iotlb) in vhost_copy_from_user()
827 void __user *uaddr = vhost_vq_meta_fetch(vq, in vhost_copy_from_user()
835 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, in vhost_copy_from_user()
836 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_from_user()
839 vq_err(vq, "IOTLB translation failure: uaddr " in vhost_copy_from_user()
844 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size); in vhost_copy_from_user()
854 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, in __vhost_get_user_slow() argument
860 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, in __vhost_get_user_slow()
861 ARRAY_SIZE(vq->iotlb_iov), in __vhost_get_user_slow()
864 vq_err(vq, "IOTLB translation failure: uaddr " in __vhost_get_user_slow()
870 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { in __vhost_get_user_slow()
871 vq_err(vq, "Non atomic userspace memory access: uaddr " in __vhost_get_user_slow()
877 return vq->iotlb_iov[0].iov_base; in __vhost_get_user_slow()
885 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, in __vhost_get_user() argument
889 void __user *uaddr = vhost_vq_meta_fetch(vq, in __vhost_get_user()
894 return __vhost_get_user_slow(vq, addr, size, type); in __vhost_get_user()
897 #define vhost_put_user(vq, x, ptr) \ argument
900 if (!vq->iotlb) { \
904 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
914 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) in vhost_put_avail_event() argument
916 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), in vhost_put_avail_event()
917 vhost_avail_event(vq)); in vhost_put_avail_event()
920 static inline int vhost_put_used(struct vhost_virtqueue *vq, in vhost_put_used() argument
924 return vhost_copy_to_user(vq, vq->used->ring + idx, head, in vhost_put_used()
928 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) in vhost_put_used_flags() argument
931 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), in vhost_put_used_flags()
932 &vq->used->flags); in vhost_put_used_flags()
935 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) in vhost_put_used_idx() argument
938 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), in vhost_put_used_idx()
939 &vq->used->idx); in vhost_put_used_idx()
942 #define vhost_get_user(vq, x, ptr, type) \ argument
945 if (!vq->iotlb) { \
949 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
960 #define vhost_get_avail(vq, x, ptr) \ argument
961 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
963 #define vhost_get_used(vq, x, ptr) \ argument
964 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
980 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, in vhost_get_avail_idx() argument
983 return vhost_get_avail(vq, *idx, &vq->avail->idx); in vhost_get_avail_idx()
986 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, in vhost_get_avail_head() argument
989 return vhost_get_avail(vq, *head, in vhost_get_avail_head()
990 &vq->avail->ring[idx & (vq->num - 1)]); in vhost_get_avail_head()
993 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, in vhost_get_avail_flags() argument
996 return vhost_get_avail(vq, *flags, &vq->avail->flags); in vhost_get_avail_flags()
999 static inline int vhost_get_used_event(struct vhost_virtqueue *vq, in vhost_get_used_event() argument
1002 return vhost_get_avail(vq, *event, vhost_used_event(vq)); in vhost_get_used_event()
1005 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, in vhost_get_used_idx() argument
1008 return vhost_get_used(vq, *idx, &vq->used->idx); in vhost_get_used_idx()
1011 static inline int vhost_get_desc(struct vhost_virtqueue *vq, in vhost_get_desc() argument
1014 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); in vhost_get_desc()
1070 vhost_poll_queue(&node->vq->poll); in vhost_iotlb_notify_vq()
1269 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) in vhost_iotlb_miss() argument
1271 struct vhost_dev *dev = vq->dev; in vhost_iotlb_miss()
1274 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2); in vhost_iotlb_miss()
1276 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG); in vhost_iotlb_miss()
1296 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, in vq_access_ok() argument
1302 return access_ok(desc, vhost_get_desc_size(vq, num)) && in vq_access_ok()
1303 access_ok(avail, vhost_get_avail_size(vq, num)) && in vq_access_ok()
1304 access_ok(used, vhost_get_used_size(vq, num)); in vq_access_ok()
1307 static void vhost_vq_meta_update(struct vhost_virtqueue *vq, in vhost_vq_meta_update() argument
1315 vq->meta_iotlb[type] = node; in vhost_vq_meta_update()
1318 static bool iotlb_access_ok(struct vhost_virtqueue *vq, in iotlb_access_ok() argument
1322 struct vhost_umem *umem = vq->iotlb; in iotlb_access_ok()
1325 if (vhost_vq_meta_fetch(vq, addr, len, type)) in iotlb_access_ok()
1333 vhost_iotlb_miss(vq, addr, access); in iotlb_access_ok()
1345 vhost_vq_meta_update(vq, node, type); in iotlb_access_ok()
1354 int vq_meta_prefetch(struct vhost_virtqueue *vq) in vq_meta_prefetch() argument
1356 unsigned int num = vq->num; in vq_meta_prefetch()
1358 if (!vq->iotlb) in vq_meta_prefetch()
1361 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, in vq_meta_prefetch()
1362 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && in vq_meta_prefetch()
1363 iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, in vq_meta_prefetch()
1364 vhost_get_avail_size(vq, num), in vq_meta_prefetch()
1366 iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, in vq_meta_prefetch()
1367 vhost_get_used_size(vq, num), VHOST_ADDR_USED); in vq_meta_prefetch()
1381 static bool vq_log_access_ok(struct vhost_virtqueue *vq, in vq_log_access_ok() argument
1384 return vq_memory_access_ok(log_base, vq->umem, in vq_log_access_ok()
1385 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && in vq_log_access_ok()
1386 (!vq->log_used || log_access_ok(log_base, vq->log_addr, in vq_log_access_ok()
1387 vhost_get_used_size(vq, vq->num))); in vq_log_access_ok()
1392 bool vhost_vq_access_ok(struct vhost_virtqueue *vq) in vhost_vq_access_ok() argument
1394 if (!vq_log_access_ok(vq, vq->log_base)) in vhost_vq_access_ok()
1398 if (vq->iotlb) in vhost_vq_access_ok()
1401 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); in vhost_vq_access_ok()
1488 struct vhost_virtqueue *vq, in vhost_vring_set_num() argument
1495 if (vq->private_data) in vhost_vring_set_num()
1503 vq->num = s.num; in vhost_vring_set_num()
1509 struct vhost_virtqueue *vq, in vhost_vring_set_addr() argument
1527 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); in vhost_vring_set_addr()
1528 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); in vhost_vring_set_addr()
1537 if (vq->private_data) { in vhost_vring_set_addr()
1538 if (!vq_access_ok(vq, vq->num, in vhost_vring_set_addr()
1546 !log_access_ok(vq->log_base, a.log_guest_addr, in vhost_vring_set_addr()
1547 sizeof *vq->used + in vhost_vring_set_addr()
1548 vq->num * sizeof *vq->used->ring)) in vhost_vring_set_addr()
1552 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); in vhost_vring_set_addr()
1553 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; in vhost_vring_set_addr()
1554 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; in vhost_vring_set_addr()
1555 vq->log_addr = a.log_guest_addr; in vhost_vring_set_addr()
1556 vq->used = (void __user *)(unsigned long)a.used_user_addr; in vhost_vring_set_addr()
1562 struct vhost_virtqueue *vq, in vhost_vring_set_num_addr() argument
1568 mutex_lock(&vq->mutex); in vhost_vring_set_num_addr()
1572 r = vhost_vring_set_num(d, vq, argp); in vhost_vring_set_num_addr()
1575 r = vhost_vring_set_addr(d, vq, argp); in vhost_vring_set_num_addr()
1581 mutex_unlock(&vq->mutex); in vhost_vring_set_num_addr()
1591 struct vhost_virtqueue *vq; in vhost_vring_ioctl() local
1604 vq = d->vqs[idx]; in vhost_vring_ioctl()
1608 return vhost_vring_set_num_addr(d, vq, ioctl, argp); in vhost_vring_ioctl()
1611 mutex_lock(&vq->mutex); in vhost_vring_ioctl()
1617 if (vq->private_data) { in vhost_vring_ioctl()
1629 vq->last_avail_idx = s.num; in vhost_vring_ioctl()
1631 vq->avail_idx = vq->last_avail_idx; in vhost_vring_ioctl()
1635 s.num = vq->last_avail_idx; in vhost_vring_ioctl()
1649 if (eventfp != vq->kick) { in vhost_vring_ioctl()
1650 pollstop = (filep = vq->kick) != NULL; in vhost_vring_ioctl()
1651 pollstart = (vq->kick = eventfp) != NULL; in vhost_vring_ioctl()
1665 swap(ctx, vq->call_ctx); in vhost_vring_ioctl()
1677 swap(ctx, vq->error_ctx); in vhost_vring_ioctl()
1680 r = vhost_set_vring_endian(vq, argp); in vhost_vring_ioctl()
1683 r = vhost_get_vring_endian(vq, idx, argp); in vhost_vring_ioctl()
1690 vq->busyloop_timeout = s.num; in vhost_vring_ioctl()
1694 s.num = vq->busyloop_timeout; in vhost_vring_ioctl()
1702 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
1703 vhost_poll_stop(&vq->poll); in vhost_vring_ioctl()
1710 if (pollstart && vq->handle_kick) in vhost_vring_ioctl()
1711 r = vhost_poll_start(&vq->poll, vq->kick); in vhost_vring_ioctl()
1713 mutex_unlock(&vq->mutex); in vhost_vring_ioctl()
1715 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
1716 vhost_poll_flush(&vq->poll); in vhost_vring_ioctl()
1734 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_init_device_iotlb() local
1736 mutex_lock(&vq->mutex); in vhost_init_device_iotlb()
1737 vq->iotlb = niotlb; in vhost_init_device_iotlb()
1738 __vhost_vq_meta_reset(vq); in vhost_init_device_iotlb()
1739 mutex_unlock(&vq->mutex); in vhost_init_device_iotlb()
1781 struct vhost_virtqueue *vq; in vhost_dev_ioctl() local
1783 vq = d->vqs[i]; in vhost_dev_ioctl()
1784 mutex_lock(&vq->mutex); in vhost_dev_ioctl()
1786 if (vq->private_data && !vq_log_access_ok(vq, base)) in vhost_dev_ioctl()
1789 vq->log_base = base; in vhost_dev_ioctl()
1790 mutex_unlock(&vq->mutex); in vhost_dev_ioctl()
1870 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) in log_write_hva() argument
1872 struct vhost_umem *umem = vq->umem; in log_write_hva()
1891 r = log_write(vq->log_base, in log_write_hva()
1910 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) in log_used() argument
1915 if (!vq->iotlb) in log_used()
1916 return log_write(vq->log_base, vq->log_addr + used_offset, len); in log_used()
1918 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, in log_used()
1924 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, in log_used()
1933 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, in vhost_log_write() argument
1941 if (vq->iotlb) { in vhost_log_write()
1943 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, in vhost_log_write()
1953 r = log_write(vq->log_base, log[i].addr, l); in vhost_log_write()
1958 if (vq->log_ctx) in vhost_log_write()
1959 eventfd_signal(vq->log_ctx, 1); in vhost_log_write()
1969 static int vhost_update_used_flags(struct vhost_virtqueue *vq) in vhost_update_used_flags() argument
1972 if (vhost_put_used_flags(vq)) in vhost_update_used_flags()
1974 if (unlikely(vq->log_used)) { in vhost_update_used_flags()
1978 used = &vq->used->flags; in vhost_update_used_flags()
1979 log_used(vq, (used - (void __user *)vq->used), in vhost_update_used_flags()
1980 sizeof vq->used->flags); in vhost_update_used_flags()
1981 if (vq->log_ctx) in vhost_update_used_flags()
1982 eventfd_signal(vq->log_ctx, 1); in vhost_update_used_flags()
1987 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) in vhost_update_avail_event() argument
1989 if (vhost_put_avail_event(vq)) in vhost_update_avail_event()
1991 if (unlikely(vq->log_used)) { in vhost_update_avail_event()
1996 used = vhost_avail_event(vq); in vhost_update_avail_event()
1997 log_used(vq, (used - (void __user *)vq->used), in vhost_update_avail_event()
1998 sizeof *vhost_avail_event(vq)); in vhost_update_avail_event()
1999 if (vq->log_ctx) in vhost_update_avail_event()
2000 eventfd_signal(vq->log_ctx, 1); in vhost_update_avail_event()
2005 int vhost_vq_init_access(struct vhost_virtqueue *vq) in vhost_vq_init_access() argument
2009 bool is_le = vq->is_le; in vhost_vq_init_access()
2011 if (!vq->private_data) in vhost_vq_init_access()
2014 vhost_init_is_le(vq); in vhost_vq_init_access()
2016 r = vhost_update_used_flags(vq); in vhost_vq_init_access()
2019 vq->signalled_used_valid = false; in vhost_vq_init_access()
2020 if (!vq->iotlb && in vhost_vq_init_access()
2021 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { in vhost_vq_init_access()
2025 r = vhost_get_used_idx(vq, &last_used_idx); in vhost_vq_init_access()
2027 vq_err(vq, "Can't access used idx at %p\n", in vhost_vq_init_access()
2028 &vq->used->idx); in vhost_vq_init_access()
2031 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); in vhost_vq_init_access()
2035 vq->is_le = is_le; in vhost_vq_init_access()
2040 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, in translate_desc() argument
2044 struct vhost_dev *dev = vq->dev; in translate_desc()
2082 vhost_iotlb_miss(vq, addr, access); in translate_desc()
2089 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) in next_desc() argument
2094 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) in next_desc()
2098 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); in next_desc()
2102 static int get_indirect(struct vhost_virtqueue *vq, in get_indirect() argument
2110 u32 len = vhost32_to_cpu(vq, indirect->len); in get_indirect()
2116 vq_err(vq, "Invalid length in indirect descriptor: " in get_indirect()
2123 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, in get_indirect()
2127 vq_err(vq, "Translation failure %d in indirect.\n", ret); in get_indirect()
2130 iov_iter_init(&from, READ, vq->indirect, ret, len); in get_indirect()
2140 vq_err(vq, "Indirect buffer length too big: %d\n", in get_indirect()
2148 vq_err(vq, "Loop detected: last one at %u " in get_indirect()
2154 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", in get_indirect()
2155 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2158 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { in get_indirect()
2159 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", in get_indirect()
2160 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2164 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) in get_indirect()
2169 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in get_indirect()
2170 vhost32_to_cpu(vq, desc.len), iov + iov_count, in get_indirect()
2174 vq_err(vq, "Translation failure %d indirect idx %d\n", in get_indirect()
2182 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in get_indirect()
2183 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in get_indirect()
2190 vq_err(vq, "Indirect descriptor " in get_indirect()
2196 } while ((i = next_desc(vq, &desc)) != -1); in get_indirect()
2208 int vhost_get_vq_desc(struct vhost_virtqueue *vq, in vhost_get_vq_desc() argument
2221 last_avail_idx = vq->last_avail_idx; in vhost_get_vq_desc()
2223 if (vq->avail_idx == vq->last_avail_idx) { in vhost_get_vq_desc()
2224 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) { in vhost_get_vq_desc()
2225 vq_err(vq, "Failed to access avail idx at %p\n", in vhost_get_vq_desc()
2226 &vq->avail->idx); in vhost_get_vq_desc()
2229 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_get_vq_desc()
2231 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { in vhost_get_vq_desc()
2232 vq_err(vq, "Guest moved used index from %u to %u", in vhost_get_vq_desc()
2233 last_avail_idx, vq->avail_idx); in vhost_get_vq_desc()
2240 if (vq->avail_idx == last_avail_idx) in vhost_get_vq_desc()
2241 return vq->num; in vhost_get_vq_desc()
2251 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) { in vhost_get_vq_desc()
2252 vq_err(vq, "Failed to read head: idx %d address %p\n", in vhost_get_vq_desc()
2254 &vq->avail->ring[last_avail_idx % vq->num]); in vhost_get_vq_desc()
2258 head = vhost16_to_cpu(vq, ring_head); in vhost_get_vq_desc()
2261 if (unlikely(head >= vq->num)) { in vhost_get_vq_desc()
2262 vq_err(vq, "Guest says index %u > %u is available", in vhost_get_vq_desc()
2263 head, vq->num); in vhost_get_vq_desc()
2275 if (unlikely(i >= vq->num)) { in vhost_get_vq_desc()
2276 vq_err(vq, "Desc index is %u > %u, head = %u", in vhost_get_vq_desc()
2277 i, vq->num, head); in vhost_get_vq_desc()
2280 if (unlikely(++found > vq->num)) { in vhost_get_vq_desc()
2281 vq_err(vq, "Loop detected: last one at %u " in vhost_get_vq_desc()
2283 i, vq->num, head); in vhost_get_vq_desc()
2286 ret = vhost_get_desc(vq, &desc, i); in vhost_get_vq_desc()
2288 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", in vhost_get_vq_desc()
2289 i, vq->desc + i); in vhost_get_vq_desc()
2292 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { in vhost_get_vq_desc()
2293 ret = get_indirect(vq, iov, iov_size, in vhost_get_vq_desc()
2298 vq_err(vq, "Failure detected " in vhost_get_vq_desc()
2305 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) in vhost_get_vq_desc()
2309 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in vhost_get_vq_desc()
2310 vhost32_to_cpu(vq, desc.len), iov + iov_count, in vhost_get_vq_desc()
2314 vq_err(vq, "Translation failure %d descriptor idx %d\n", in vhost_get_vq_desc()
2323 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in vhost_get_vq_desc()
2324 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in vhost_get_vq_desc()
2331 vq_err(vq, "Descriptor has out after in: " in vhost_get_vq_desc()
2337 } while ((i = next_desc(vq, &desc)) != -1); in vhost_get_vq_desc()
2340 vq->last_avail_idx++; in vhost_get_vq_desc()
2344 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); in vhost_get_vq_desc()
2350 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) in vhost_discard_vq_desc() argument
2352 vq->last_avail_idx -= n; in vhost_discard_vq_desc()
2358 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) in vhost_add_used() argument
2361 cpu_to_vhost32(vq, head), in vhost_add_used()
2362 cpu_to_vhost32(vq, len) in vhost_add_used()
2365 return vhost_add_used_n(vq, &heads, 1); in vhost_add_used()
2369 static int __vhost_add_used_n(struct vhost_virtqueue *vq, in __vhost_add_used_n() argument
2377 start = vq->last_used_idx & (vq->num - 1); in __vhost_add_used_n()
2378 used = vq->used->ring + start; in __vhost_add_used_n()
2379 if (vhost_put_used(vq, heads, start, count)) { in __vhost_add_used_n()
2380 vq_err(vq, "Failed to write used"); in __vhost_add_used_n()
2383 if (unlikely(vq->log_used)) { in __vhost_add_used_n()
2387 log_used(vq, ((void __user *)used - (void __user *)vq->used), in __vhost_add_used_n()
2390 old = vq->last_used_idx; in __vhost_add_used_n()
2391 new = (vq->last_used_idx += count); in __vhost_add_used_n()
2396 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) in __vhost_add_used_n()
2397 vq->signalled_used_valid = false; in __vhost_add_used_n()
2403 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, in vhost_add_used_n() argument
2408 start = vq->last_used_idx & (vq->num - 1); in vhost_add_used_n()
2409 n = vq->num - start; in vhost_add_used_n()
2411 r = __vhost_add_used_n(vq, heads, n); in vhost_add_used_n()
2417 r = __vhost_add_used_n(vq, heads, count); in vhost_add_used_n()
2421 if (vhost_put_used_idx(vq)) { in vhost_add_used_n()
2422 vq_err(vq, "Failed to increment used idx"); in vhost_add_used_n()
2425 if (unlikely(vq->log_used)) { in vhost_add_used_n()
2429 log_used(vq, offsetof(struct vring_used, idx), in vhost_add_used_n()
2430 sizeof vq->used->idx); in vhost_add_used_n()
2431 if (vq->log_ctx) in vhost_add_used_n()
2432 eventfd_signal(vq->log_ctx, 1); in vhost_add_used_n()
2438 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_notify() argument
2448 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && in vhost_notify()
2449 unlikely(vq->avail_idx == vq->last_avail_idx)) in vhost_notify()
2452 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_notify()
2454 if (vhost_get_avail_flags(vq, &flags)) { in vhost_notify()
2455 vq_err(vq, "Failed to get flags"); in vhost_notify()
2458 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); in vhost_notify()
2460 old = vq->signalled_used; in vhost_notify()
2461 v = vq->signalled_used_valid; in vhost_notify()
2462 new = vq->signalled_used = vq->last_used_idx; in vhost_notify()
2463 vq->signalled_used_valid = true; in vhost_notify()
2468 if (vhost_get_used_event(vq, &event)) { in vhost_notify()
2469 vq_err(vq, "Failed to get used event idx"); in vhost_notify()
2472 return vring_need_event(vhost16_to_cpu(vq, event), new, old); in vhost_notify()
2476 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_signal() argument
2479 if (vq->call_ctx && vhost_notify(dev, vq)) in vhost_signal()
2480 eventfd_signal(vq->call_ctx, 1); in vhost_signal()
2486 struct vhost_virtqueue *vq, in vhost_add_used_and_signal() argument
2489 vhost_add_used(vq, head, len); in vhost_add_used_and_signal()
2490 vhost_signal(dev, vq); in vhost_add_used_and_signal()
2496 struct vhost_virtqueue *vq, in vhost_add_used_and_signal_n() argument
2499 vhost_add_used_n(vq, heads, count); in vhost_add_used_and_signal_n()
2500 vhost_signal(dev, vq); in vhost_add_used_and_signal_n()
2505 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_vq_avail_empty() argument
2510 if (vq->avail_idx != vq->last_avail_idx) in vhost_vq_avail_empty()
2513 r = vhost_get_avail_idx(vq, &avail_idx); in vhost_vq_avail_empty()
2516 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_vq_avail_empty()
2518 return vq->avail_idx == vq->last_avail_idx; in vhost_vq_avail_empty()
2523 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_enable_notify() argument
2528 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) in vhost_enable_notify()
2530 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; in vhost_enable_notify()
2531 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_enable_notify()
2532 r = vhost_update_used_flags(vq); in vhost_enable_notify()
2534 vq_err(vq, "Failed to enable notification at %p: %d\n", in vhost_enable_notify()
2535 &vq->used->flags, r); in vhost_enable_notify()
2539 r = vhost_update_avail_event(vq, vq->avail_idx); in vhost_enable_notify()
2541 vq_err(vq, "Failed to update avail event index at %p: %d\n", in vhost_enable_notify()
2542 vhost_avail_event(vq), r); in vhost_enable_notify()
2549 r = vhost_get_avail_idx(vq, &avail_idx); in vhost_enable_notify()
2551 vq_err(vq, "Failed to check avail idx at %p: %d\n", in vhost_enable_notify()
2552 &vq->avail->idx, r); in vhost_enable_notify()
2556 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; in vhost_enable_notify()
2561 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_disable_notify() argument
2565 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) in vhost_disable_notify()
2567 vq->used_flags |= VRING_USED_F_NO_NOTIFY; in vhost_disable_notify()
2568 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_disable_notify()
2569 r = vhost_update_used_flags(vq); in vhost_disable_notify()
2571 vq_err(vq, "Failed to enable notification at %p: %d\n", in vhost_disable_notify()
2572 &vq->used->flags, r); in vhost_disable_notify()
2578 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) in vhost_new_msg() argument
2586 node->vq = vq; in vhost_new_msg()