Home
last modified time | relevance | path

Searched refs:vq (Results 1 – 25 of 174) sorted by relevance

1234567

/Linux-v5.4/drivers/virtio/
Dvirtio_ring.c20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
29 (_vq)->vq.name, (_vq)->in_use); \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
61 #define START_USE(vq) argument
62 #define END_USE(vq) argument
63 #define LAST_ADD_TIME_UPDATE(vq) argument
64 #define LAST_ADD_TIME_CHECK(vq) argument
65 #define LAST_ADD_TIME_INVALID(vq) argument
[all …]
Dvirtio_mmio.c97 struct virtqueue *vq; member
272 static bool vm_notify(struct virtqueue *vq) in vm_notify() argument
274 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); in vm_notify()
278 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); in vm_notify()
303 ret |= vring_interrupt(irq, info->vq); in vm_interrupt()
312 static void vm_del_vq(struct virtqueue *vq) in vm_del_vq() argument
314 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); in vm_del_vq()
315 struct virtio_mmio_vq_info *info = vq->priv; in vm_del_vq()
317 unsigned int index = vq->index; in vm_del_vq()
332 vring_del_virtqueue(vq); in vm_del_vq()
[all …]
Dvirtio_pci_legacy.c116 void (*callback)(struct virtqueue *vq), in setup_vq() argument
121 struct virtqueue *vq; in setup_vq() local
137 vq = vring_create_virtqueue(index, num, in setup_vq()
141 if (!vq) in setup_vq()
144 q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; in setup_vq()
156 vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; in setup_vq()
167 return vq; in setup_vq()
172 vring_del_virtqueue(vq); in setup_vq()
178 struct virtqueue *vq = info->vq; in del_vq() local
179 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); in del_vq()
[all …]
Dvirtio_pci_common.c41 bool vp_notify(struct virtqueue *vq) in vp_notify() argument
45 iowrite16(vq->index, (void __iomem *)vq->priv); in vp_notify()
68 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) in vp_vring_interrupt()
175 void (*callback)(struct virtqueue *vq), in vp_setup_vq() argument
182 struct virtqueue *vq; in vp_setup_vq() local
189 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, in vp_setup_vq()
191 if (IS_ERR(vq)) in vp_setup_vq()
194 info->vq = vq; in vp_setup_vq()
204 return vq; in vp_setup_vq()
208 return vq; in vp_setup_vq()
[all …]
/Linux-v5.4/drivers/vhost/
Dvhost.c49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) argument
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) argument
57 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
59 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
62 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_big() argument
64 vq->user_be = true; in vhost_enable_cross_endian_big()
67 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_little() argument
69 vq->user_be = false; in vhost_enable_cross_endian_little()
72 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
76 if (vq->private_data) in vhost_set_vring_endian()
[all …]
Dnet.c97 struct vhost_virtqueue *vq; member
108 struct vhost_virtqueue vq; member
228 static void vhost_net_enable_zcopy(int vq) in vhost_net_enable_zcopy() argument
230 vhost_net_zcopy_mask |= 0x1 << vq; in vhost_net_enable_zcopy()
234 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) in vhost_net_ubuf_alloc() argument
245 ubufs->vq = vq; in vhost_net_ubuf_alloc()
359 struct vhost_virtqueue *vq) in vhost_zerocopy_signal_used() argument
362 container_of(vq, struct vhost_net_virtqueue, vq); in vhost_zerocopy_signal_used()
367 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) in vhost_zerocopy_signal_used()
369 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { in vhost_zerocopy_signal_used()
[all …]
Dvsock.c85 struct vhost_virtqueue *vq) in vhost_transport_do_send_pkt() argument
92 mutex_lock(&vq->mutex); in vhost_transport_do_send_pkt()
94 if (!vq->private_data) in vhost_transport_do_send_pkt()
98 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
111 vhost_enable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
120 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), in vhost_transport_do_send_pkt()
129 if (head == vq->num) { in vhost_transport_do_send_pkt()
137 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_transport_do_send_pkt()
138 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
146 vq_err(vq, "Expected 0 output buffers, got %u\n", out); in vhost_transport_do_send_pkt()
[all …]
Dtest.c45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq() local
51 mutex_lock(&vq->mutex); in handle_vq()
52 private = vq->private_data; in handle_vq()
54 mutex_unlock(&vq->mutex); in handle_vq()
58 vhost_disable_notify(&n->dev, vq); in handle_vq()
61 head = vhost_get_vq_desc(vq, vq->iov, in handle_vq()
62 ARRAY_SIZE(vq->iov), in handle_vq()
69 if (head == vq->num) { in handle_vq()
70 if (unlikely(vhost_enable_notify(&n->dev, vq))) { in handle_vq()
71 vhost_disable_notify(&n->dev, vq); in handle_vq()
[all …]
Dscsi.c180 struct vhost_virtqueue vq; member
243 struct vhost_virtqueue *vq; in vhost_scsi_init_inflight() local
247 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
249 mutex_lock(&vq->mutex); in vhost_scsi_init_inflight()
262 mutex_unlock(&vq->mutex); in vhost_scsi_init_inflight()
267 vhost_scsi_get_inflight(struct vhost_virtqueue *vq) in vhost_scsi_get_inflight() argument
272 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); in vhost_scsi_get_inflight()
410 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt() local
420 vq_err(vq, "Failed to allocate vhost_scsi_evt\n"); in vhost_scsi_allocate_evt()
425 evt->event.event = cpu_to_vhost32(vq, event); in vhost_scsi_allocate_evt()
[all …]
Dvhost.h155 struct vhost_virtqueue *vq; member
178 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
190 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
212 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
215 int vq_meta_prefetch(struct vhost_virtqueue *vq);
217 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
231 #define vq_err(vq, fmt, ...) do { \ argument
233 if ((vq)->error_ctx) \
234 eventfd_signal((vq)->error_ctx, 1);\
246 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) in vhost_has_feature() argument
[all …]
/Linux-v5.4/arch/arm64/include/uapi/asm/
Dsve_context.h31 #define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) argument
33 #define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) argument
34 #define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) argument
35 #define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) argument
38 #define __SVE_ZREG_OFFSET(vq, n) \ argument
39 (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
40 #define __SVE_ZREGS_SIZE(vq) \ argument
41 (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
43 #define __SVE_PREGS_OFFSET(vq) \ argument
44 (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
[all …]
Dptrace.h155 #define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state)) argument
186 #define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) argument
187 #define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) argument
188 #define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) argument
196 #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ argument
197 (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
198 #define SVE_PT_SVE_ZREGS_SIZE(vq) \ argument
199 (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
201 #define SVE_PT_SVE_PREGS_OFFSET(vq) \ argument
202 (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
[all …]
Dsigcontext.h165 #define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) argument
223 #define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) argument
224 #define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) argument
225 #define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) argument
233 #define SVE_SIG_ZREG_OFFSET(vq, n) \ argument
234 (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
235 #define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) argument
237 #define SVE_SIG_PREGS_OFFSET(vq) \ argument
238 (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
239 #define SVE_SIG_PREG_OFFSET(vq, n) \ argument
[all …]
/Linux-v5.4/include/linux/
Dvirtio.h30 void (*callback)(struct virtqueue *vq);
38 int virtqueue_add_outbuf(struct virtqueue *vq,
43 int virtqueue_add_inbuf(struct virtqueue *vq,
48 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
54 int virtqueue_add_sgs(struct virtqueue *vq,
61 bool virtqueue_kick(struct virtqueue *vq);
63 bool virtqueue_kick_prepare(struct virtqueue *vq);
65 bool virtqueue_notify(struct virtqueue *vq);
67 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
69 void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
[all …]
/Linux-v5.4/tools/virtio/linux/
Dvirtio.h20 void (*callback)(struct virtqueue *vq);
29 int virtqueue_add_sgs(struct virtqueue *vq,
36 int virtqueue_add_outbuf(struct virtqueue *vq,
41 int virtqueue_add_inbuf(struct virtqueue *vq,
46 bool virtqueue_kick(struct virtqueue *vq);
48 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
50 void virtqueue_disable_cb(struct virtqueue *vq);
52 bool virtqueue_enable_cb(struct virtqueue *vq);
53 bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
55 void *virtqueue_detach_unused_buf(struct virtqueue *vq);
[all …]
/Linux-v5.4/drivers/gpu/drm/virtio/
Dvirtgpu_trace.h12 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
13 TP_ARGS(vq, hdr),
16 __field(unsigned int, vq)
24 __entry->dev = vq->vdev->index;
25 __entry->vq = vq->index;
26 __entry->name = vq->name;
33 __entry->dev, __entry->vq, __entry->name,
39 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
40 TP_ARGS(vq, hdr)
44 TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
[all …]
/Linux-v5.4/tools/virtio/
Dvirtio_test.c32 struct virtqueue *vq; member
46 bool vq_notify(struct virtqueue *vq) in vq_notify() argument
48 struct vq_info *info = vq->priv; in vq_notify()
56 void vq_callback(struct virtqueue *vq) in vq_callback() argument
102 info->vq = vring_new_virtqueue(info->idx, in vq_info_add()
106 assert(info->vq); in vq_info_add()
107 info->vq->priv = info; in vq_info_add()
154 static void run_test(struct vdev_info *dev, struct vq_info *vq, in run_test() argument
166 virtqueue_disable_cb(vq->vq); in run_test()
171 r = virtqueue_add_outbuf(vq->vq, &sl, 1, in run_test()
[all …]
Dvringh_test.c27 static bool never_notify_host(struct virtqueue *vq) in never_notify_host() argument
32 static void never_callback_guest(struct virtqueue *vq) in never_callback_guest() argument
70 static bool parallel_notify_host(struct virtqueue *vq) in parallel_notify_host() argument
75 gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev); in parallel_notify_host()
83 static bool no_notify_host(struct virtqueue *vq) in no_notify_host() argument
294 struct virtqueue *vq; in parallel_test() local
317 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true, in parallel_test()
334 while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) { in parallel_test()
380 err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf, in parallel_test()
383 err = virtqueue_add_inbuf(vq, sg, num_sg, in parallel_test()
[all …]
/Linux-v5.4/net/vmw_vsock/
Dvirtio_transport.c135 struct virtqueue *vq; in virtio_transport_send_pkt_work() local
144 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_send_pkt_work()
174 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); in virtio_transport_send_pkt_work()
200 virtqueue_kick(vq); in virtio_transport_send_pkt_work()
294 struct virtqueue *vq; in virtio_vsock_rx_fill() local
297 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_vsock_rx_fill()
318 ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); in virtio_vsock_rx_fill()
324 } while (vq->num_free); in virtio_vsock_rx_fill()
327 virtqueue_kick(vq); in virtio_vsock_rx_fill()
334 struct virtqueue *vq; in virtio_transport_tx_work() local
[all …]
/Linux-v5.4/arch/arm64/kvm/
Dguest.c209 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) argument
210 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) argument
211 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) argument
215 unsigned int max_vq, vq; in get_sve_vls() local
227 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) in get_sve_vls()
228 if (sve_vq_available(vq)) in get_sve_vls()
229 vqs[vq_word(vq)] |= vq_mask(vq); in get_sve_vls()
239 unsigned int max_vq, vq; in set_sve_vls() local
255 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) in set_sve_vls()
256 if (vq_present(vqs, vq)) in set_sve_vls()
[all …]
/Linux-v5.4/drivers/staging/media/sunxi/cedrus/
Dcedrus_video.c290 struct vb2_queue *vq; in cedrus_s_fmt_vid_cap() local
293 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); in cedrus_s_fmt_vid_cap()
294 if (vb2_is_busy(vq)) in cedrus_s_fmt_vid_cap()
312 struct vb2_queue *vq; in cedrus_s_fmt_vid_out() local
315 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); in cedrus_s_fmt_vid_out()
316 if (vb2_is_busy(vq)) in cedrus_s_fmt_vid_out()
362 static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs, in cedrus_queue_setup() argument
366 struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); in cedrus_queue_setup()
371 if (V4L2_TYPE_IS_OUTPUT(vq->type)) { in cedrus_queue_setup()
394 static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state) in cedrus_queue_cleanup() argument
[all …]
/Linux-v5.4/drivers/scsi/
Dvirtio_scsi.c67 struct virtqueue *vq; member
178 struct virtqueue *vq = virtscsi_vq->vq; in virtscsi_vq_done() local
182 virtqueue_disable_cb(vq); in virtscsi_vq_done()
183 while ((buf = virtqueue_get_buf(vq, &len)) != NULL) in virtscsi_vq_done()
186 if (unlikely(virtqueue_is_broken(vq))) in virtscsi_vq_done()
188 } while (!virtqueue_enable_cb(vq)); in virtscsi_vq_done()
192 static void virtscsi_req_done(struct virtqueue *vq) in virtscsi_req_done() argument
194 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); in virtscsi_req_done()
196 int index = vq->index - VIRTIO_SCSI_VQ_BASE; in virtscsi_req_done()
220 static void virtscsi_ctrl_done(struct virtqueue *vq) in virtscsi_ctrl_done() argument
[all …]
/Linux-v5.4/drivers/remoteproc/
Dremoteproc_virtio.c27 static bool rproc_virtio_notify(struct virtqueue *vq) in rproc_virtio_notify() argument
29 struct rproc_vring *rvring = vq->priv; in rproc_virtio_notify()
58 if (!rvring || !rvring->vq) in rproc_vq_interrupt()
61 return vring_interrupt(0, rvring->vq); in rproc_vq_interrupt()
67 void (*callback)(struct virtqueue *vq), in rp_find_vq() argument
76 struct virtqueue *vq; in rp_find_vq() local
108 vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx, in rp_find_vq()
110 if (!vq) { in rp_find_vq()
116 rvring->vq = vq; in rp_find_vq()
117 vq->priv = rvring; in rp_find_vq()
[all …]
/Linux-v5.4/drivers/char/hw_random/
Dvirtio-rng.c19 struct virtqueue *vq; member
29 static void random_recv_done(struct virtqueue *vq) in random_recv_done() argument
31 struct virtrng_info *vi = vq->vdev->priv; in random_recv_done()
34 if (!virtqueue_get_buf(vi->vq, &vi->data_avail)) in random_recv_done()
48 virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); in register_buffer()
50 virtqueue_kick(vi->vq); in register_buffer()
114 vi->vq = virtio_find_single_vq(vdev, random_recv_done, "input"); in probe_common()
115 if (IS_ERR(vi->vq)) { in probe_common()
116 err = PTR_ERR(vi->vq); in probe_common()
/Linux-v5.4/fs/fuse/
Dvirtio_fs.c30 struct virtqueue *vq; /* protected by ->lock */ member
61 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq) in vq_to_fsvq() argument
63 struct virtio_fs *fs = vq->vdev->priv; in vq_to_fsvq()
65 return &fs->vqs[vq->index]; in vq_to_fsvq()
68 static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq) in vq_to_fpq() argument
70 return &vq_to_fsvq(vq)->fud->pq; in vq_to_fpq()
242 struct virtqueue *vq = fsvq->vq; in virtio_fs_hiprio_done_work() local
250 virtqueue_disable_cb(vq); in virtio_fs_hiprio_done_work()
252 while ((req = virtqueue_get_buf(vq, &len)) != NULL) { in virtio_fs_hiprio_done_work()
256 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); in virtio_fs_hiprio_done_work()
[all …]

1234567