Lines Matching refs:vrh

33 static inline int __vringh_get_head(const struct vringh *vrh,  in __vringh_get_head()  argument
34 int (*getu16)(const struct vringh *vrh, in __vringh_get_head() argument
41 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx); in __vringh_get_head()
44 &vrh->vring.avail->idx); in __vringh_get_head()
49 return vrh->vring.num; in __vringh_get_head()
52 virtio_rmb(vrh->weak_barriers); in __vringh_get_head()
54 i = *last_avail_idx & (vrh->vring.num - 1); in __vringh_get_head()
56 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); in __vringh_get_head()
59 *last_avail_idx, &vrh->vring.avail->ring[i]); in __vringh_get_head()
63 if (head >= vrh->vring.num) { in __vringh_get_head()
65 head, vrh->vring.num); in __vringh_get_head()
108 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, in range_check() argument
114 if (!getrange(vrh, addr, range)) in range_check()
142 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, in no_range_check() argument
151 static int move_to_indirect(const struct vringh *vrh, in move_to_indirect() argument
164 len = vringh32_to_cpu(vrh, desc->len); in move_to_indirect()
171 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) in move_to_indirect()
172 *up_next = vringh16_to_cpu(vrh, desc->next); in move_to_indirect()
209 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, in return_from_indirect() argument
215 *descs = vrh->vring.desc; in return_from_indirect()
216 *desc_max = vrh->vring.num; in return_from_indirect()
220 static int slow_copy(struct vringh *vrh, void *dst, const void *src, in slow_copy() argument
221 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, in slow_copy() argument
223 bool (*getrange)(struct vringh *vrh, in slow_copy() argument
226 bool (*getrange)(struct vringh *vrh, in slow_copy()
241 if (!rcheck(vrh, addr, &part, range, getrange)) in slow_copy()
256 __vringh_iov(struct vringh *vrh, u16 i, in __vringh_iov() argument
259 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, in __vringh_iov() argument
273 descs = vrh->vring.desc; in __vringh_iov()
274 desc_max = vrh->vring.num; in __vringh_iov()
291 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, in __vringh_iov()
299 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) { in __vringh_iov()
300 u64 a = vringh64_to_cpu(vrh, desc.addr); in __vringh_iov()
303 len = vringh32_to_cpu(vrh, desc.len); in __vringh_iov()
304 if (!rcheck(vrh, a, &len, &range, getrange)) { in __vringh_iov()
309 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { in __vringh_iov()
316 err = move_to_indirect(vrh, &up_next, &i, addr, &desc, in __vringh_iov()
323 if (count++ == vrh->vring.num) { in __vringh_iov()
329 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE)) in __vringh_iov()
350 len = vringh32_to_cpu(vrh, desc.len); in __vringh_iov()
351 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range, in __vringh_iov()
356 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) + in __vringh_iov()
369 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { in __vringh_iov()
370 desc.len = cpu_to_vringh32(vrh, in __vringh_iov()
371 vringh32_to_cpu(vrh, desc.len) - len); in __vringh_iov()
372 desc.addr = cpu_to_vringh64(vrh, in __vringh_iov()
373 vringh64_to_cpu(vrh, desc.addr) + len); in __vringh_iov()
377 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) { in __vringh_iov()
378 i = vringh16_to_cpu(vrh, desc.next); in __vringh_iov()
382 i = return_from_indirect(vrh, &up_next, in __vringh_iov()
402 static inline int __vringh_complete(struct vringh *vrh, in __vringh_complete() argument
405 int (*putu16)(const struct vringh *vrh, in __vringh_complete() argument
415 used_ring = vrh->vring.used; in __vringh_complete()
416 used_idx = vrh->last_used_idx + vrh->completed; in __vringh_complete()
418 off = used_idx % vrh->vring.num; in __vringh_complete()
421 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { in __vringh_complete()
422 u16 part = vrh->vring.num - off; in __vringh_complete()
437 virtio_wmb(vrh->weak_barriers); in __vringh_complete()
439 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used); in __vringh_complete()
442 &vrh->vring.used->idx); in __vringh_complete()
446 vrh->completed += num_used; in __vringh_complete()
451 static inline int __vringh_need_notify(struct vringh *vrh, in __vringh_need_notify() argument
452 int (*getu16)(const struct vringh *vrh, in __vringh_need_notify() argument
463 virtio_mb(vrh->weak_barriers); in __vringh_need_notify()
466 if (!vrh->event_indices) { in __vringh_need_notify()
468 err = getu16(vrh, &flags, &vrh->vring.avail->flags); in __vringh_need_notify()
471 &vrh->vring.avail->flags); in __vringh_need_notify()
478 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring)); in __vringh_need_notify()
481 &vring_used_event(&vrh->vring)); in __vringh_need_notify()
486 if (unlikely(vrh->completed > 0xffff)) in __vringh_need_notify()
490 vrh->last_used_idx + vrh->completed, in __vringh_need_notify()
491 vrh->last_used_idx); in __vringh_need_notify()
493 vrh->last_used_idx += vrh->completed; in __vringh_need_notify()
494 vrh->completed = 0; in __vringh_need_notify()
498 static inline bool __vringh_notify_enable(struct vringh *vrh, in __vringh_notify_enable() argument
499 int (*getu16)(const struct vringh *vrh, in __vringh_notify_enable() argument
501 int (*putu16)(const struct vringh *vrh, in __vringh_notify_enable() argument
506 if (!vrh->event_indices) { in __vringh_notify_enable()
508 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) { in __vringh_notify_enable()
510 &vrh->vring.used->flags); in __vringh_notify_enable()
514 if (putu16(vrh, &vring_avail_event(&vrh->vring), in __vringh_notify_enable()
515 vrh->last_avail_idx) != 0) { in __vringh_notify_enable()
517 &vring_avail_event(&vrh->vring)); in __vringh_notify_enable()
524 virtio_mb(vrh->weak_barriers); in __vringh_notify_enable()
526 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) { in __vringh_notify_enable()
528 &vrh->vring.avail->idx); in __vringh_notify_enable()
535 return avail == vrh->last_avail_idx; in __vringh_notify_enable()
538 static inline void __vringh_notify_disable(struct vringh *vrh, in __vringh_notify_disable() argument
539 int (*putu16)(const struct vringh *vrh, in __vringh_notify_disable() argument
542 if (!vrh->event_indices) { in __vringh_notify_disable()
544 if (putu16(vrh, &vrh->vring.used->flags, in __vringh_notify_disable()
547 &vrh->vring.used->flags); in __vringh_notify_disable()
553 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) in getu16_user() argument
557 *val = vringh16_to_cpu(vrh, v); in getu16_user()
561 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) in putu16_user() argument
563 __virtio16 v = cpu_to_vringh16(vrh, val); in putu16_user()
606 int vringh_init_user(struct vringh *vrh, u64 features, in vringh_init_user() argument
618 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); in vringh_init_user()
619 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); in vringh_init_user()
620 vrh->weak_barriers = weak_barriers; in vringh_init_user()
621 vrh->completed = 0; in vringh_init_user()
622 vrh->last_avail_idx = 0; in vringh_init_user()
623 vrh->last_used_idx = 0; in vringh_init_user()
624 vrh->vring.num = num; in vringh_init_user()
626 vrh->vring.desc = (__force struct vring_desc *)desc; in vringh_init_user()
627 vrh->vring.avail = (__force struct vring_avail *)avail; in vringh_init_user()
628 vrh->vring.used = (__force struct vring_used *)used; in vringh_init_user()
650 int vringh_getdesc_user(struct vringh *vrh, in vringh_getdesc_user() argument
653 bool (*getrange)(struct vringh *vrh, in vringh_getdesc_user() argument
659 *head = vrh->vring.num; in vringh_getdesc_user()
660 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx); in vringh_getdesc_user()
665 if (err == vrh->vring.num) in vringh_getdesc_user()
689 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, in vringh_getdesc_user()
738 void vringh_abandon_user(struct vringh *vrh, unsigned int num) in vringh_abandon_user() argument
742 vrh->last_avail_idx -= num; in vringh_abandon_user()
755 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) in vringh_complete_user() argument
759 used.id = cpu_to_vringh32(vrh, head); in vringh_complete_user()
760 used.len = cpu_to_vringh32(vrh, len); in vringh_complete_user()
761 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); in vringh_complete_user()
774 int vringh_complete_multi_user(struct vringh *vrh, in vringh_complete_multi_user() argument
778 return __vringh_complete(vrh, used, num_used, in vringh_complete_multi_user()
790 bool vringh_notify_enable_user(struct vringh *vrh) in vringh_notify_enable_user() argument
792 return __vringh_notify_enable(vrh, getu16_user, putu16_user); in vringh_notify_enable_user()
803 void vringh_notify_disable_user(struct vringh *vrh) in vringh_notify_disable_user() argument
805 __vringh_notify_disable(vrh, putu16_user); in vringh_notify_disable_user()
815 int vringh_need_notify_user(struct vringh *vrh) in vringh_need_notify_user() argument
817 return __vringh_need_notify(vrh, getu16_user); in vringh_need_notify_user()
822 static inline int getu16_kern(const struct vringh *vrh, in getu16_kern() argument
825 *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); in getu16_kern()
829 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) in putu16_kern() argument
831 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); in putu16_kern()
873 int vringh_init_kern(struct vringh *vrh, u64 features, in vringh_init_kern() argument
885 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); in vringh_init_kern()
886 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); in vringh_init_kern()
887 vrh->weak_barriers = weak_barriers; in vringh_init_kern()
888 vrh->completed = 0; in vringh_init_kern()
889 vrh->last_avail_idx = 0; in vringh_init_kern()
890 vrh->last_used_idx = 0; in vringh_init_kern()
891 vrh->vring.num = num; in vringh_init_kern()
892 vrh->vring.desc = desc; in vringh_init_kern()
893 vrh->vring.avail = avail; in vringh_init_kern()
894 vrh->vring.used = used; in vringh_init_kern()
916 int vringh_getdesc_kern(struct vringh *vrh, in vringh_getdesc_kern() argument
924 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx); in vringh_getdesc_kern()
929 if (err == vrh->vring.num) in vringh_getdesc_kern()
933 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, in vringh_getdesc_kern()
979 void vringh_abandon_kern(struct vringh *vrh, unsigned int num) in vringh_abandon_kern() argument
983 vrh->last_avail_idx -= num; in vringh_abandon_kern()
996 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) in vringh_complete_kern() argument
1000 used.id = cpu_to_vringh32(vrh, head); in vringh_complete_kern()
1001 used.len = cpu_to_vringh32(vrh, len); in vringh_complete_kern()
1003 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); in vringh_complete_kern()
1014 bool vringh_notify_enable_kern(struct vringh *vrh) in vringh_notify_enable_kern() argument
1016 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern); in vringh_notify_enable_kern()
1027 void vringh_notify_disable_kern(struct vringh *vrh) in vringh_notify_disable_kern() argument
1029 __vringh_notify_disable(vrh, putu16_kern); in vringh_notify_disable_kern()
1039 int vringh_need_notify_kern(struct vringh *vrh) in vringh_need_notify_kern() argument
1041 return __vringh_need_notify(vrh, getu16_kern); in vringh_need_notify_kern()