/Linux-v5.4/drivers/virtio/ |
D | virtio_pci_common.c | 203 vp_dev->vqs[index] = info; in vp_setup_vq() 214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; in vp_del_vq() 232 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in vp_del_vqs() 234 int v = vp_dev->vqs[vq->index]->msix_vector; in vp_del_vqs() 275 kfree(vp_dev->vqs); in vp_del_vqs() 276 vp_dev->vqs = NULL; in vp_del_vqs() 280 struct virtqueue *vqs[], vq_callback_t *callbacks[], in vp_find_vqs_msix() argument 289 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); in vp_find_vqs_msix() 290 if (!vp_dev->vqs) in vp_find_vqs_msix() 313 vqs[i] = NULL; in vp_find_vqs_msix() [all …]
|
D | virtio_pci_common.h | 80 struct virtio_pci_vq_info **vqs; member 131 struct virtqueue *vqs[], vq_callback_t *callbacks[],
|
D | virtio_mmio.c | 342 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in vm_del_vqs() 459 struct virtqueue *vqs[], in vm_find_vqs() argument 481 vqs[i] = NULL; in vm_find_vqs() 485 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], in vm_find_vqs() 487 if (IS_ERR(vqs[i])) { in vm_find_vqs() 489 return PTR_ERR(vqs[i]); in vm_find_vqs()
|
D | virtio_input.c | 171 struct virtqueue *vqs[2]; in virtinput_init_vqs() local 177 err = virtio_find_vqs(vi->vdev, 2, vqs, cbs, names, NULL); in virtinput_init_vqs() 180 vi->evt = vqs[0]; in virtinput_init_vqs() 181 vi->sts = vqs[1]; in virtinput_init_vqs()
|
/Linux-v5.4/drivers/vhost/ |
D | test.c | 38 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; member 45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq() 109 struct vhost_virtqueue **vqs; in vhost_test_open() local 113 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL); in vhost_test_open() 114 if (!vqs) { in vhost_test_open() 120 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; in vhost_test_open() 121 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; in vhost_test_open() 122 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, in vhost_test_open() 144 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); in vhost_test_stop() 149 vhost_poll_flush(&n->vqs[index].poll); in vhost_test_flush_vq() [all …]
|
D | net.c | 133 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; member 274 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info() 275 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info() 288 n->vqs[i].ubuf_info = in vhost_net_set_ubuf_info() 290 sizeof(*n->vqs[i].ubuf_info), in vhost_net_set_ubuf_info() 292 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info() 309 n->vqs[i].done_idx = 0; in vhost_net_vq_reset() 310 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset() 311 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset() 312 n->vqs[i].vhost_hlen = 0; in vhost_net_vq_reset() [all …]
|
D | vsock.c | 42 struct vhost_virtqueue vqs[2]; member 87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt() 234 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work() 300 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt() 378 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies() 481 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start() 482 vq = &vsock->vqs[i]; in vhost_vsock_start() 508 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start() 509 vq = &vsock->vqs[i]; in vhost_vsock_start() 531 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_stop() [all …]
|
D | scsi.c | 200 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; member 247 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight() 252 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight() 254 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight() 257 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight() 258 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight() 410 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt() 449 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work() 500 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work() 554 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work() [all …]
|
D | vhost.c | 297 __vhost_vq_meta_reset(d->vqs[i]); in vhost_vq_meta_reset() 387 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs() 402 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_alloc_iovecs() 411 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_free_iovecs() 456 struct vhost_virtqueue **vqs, int nvqs, in vhost_dev_init() argument 462 dev->vqs = vqs; in vhost_dev_init() 481 vq = dev->vqs[i]; in vhost_dev_init() 599 dev->vqs[i]->umem = umem; in vhost_dev_reset_owner() 608 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { in vhost_dev_stop() 609 vhost_poll_stop(&dev->vqs[i]->poll); in vhost_dev_stop() [all …]
|
/Linux-v5.4/fs/fuse/ |
D | virtio_fs.c | 46 struct virtio_fs_vq *vqs; member 65 return &fs->vqs[vq->index]; in vq_to_fsvq() 90 kfree(vfs->vqs); in release_virtio_fs_obj() 135 fsvq = &fs->vqs[i]; in virtio_fs_drain_all_queues() 146 fsvq = &fs->vqs[i]; in virtio_fs_start_all_queues() 203 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_free_devs() 525 struct virtqueue **vqs; in virtio_fs_setup_vqs() local 537 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs() 538 if (!fs->vqs) in virtio_fs_setup_vqs() 541 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs() [all …]
|
/Linux-v5.4/drivers/block/ |
D | virtio_blk.c | 55 struct virtio_blk_vq *vqs; member 238 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done() 241 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done() 254 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done() 260 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs() 335 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq() 337 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num); in virtio_queue_rq() 339 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); in virtio_queue_rq() 341 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq() 343 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq() [all …]
|
/Linux-v5.4/net/vmw_vsock/ |
D | virtio_transport.c | 30 struct virtqueue *vqs[VSOCK_VQ_MAX]; member 144 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_send_pkt_work() 186 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_send_pkt_work() 273 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_cancel_pkt() 297 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_vsock_rx_fill() 337 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_tx_work() 364 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_more_replies() 379 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_rx_work() 433 vq = vsock->vqs[VSOCK_VQ_EVENT]; in virtio_vsock_event_fill_one() 451 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); in virtio_vsock_event_fill() [all …]
|
/Linux-v5.4/drivers/crypto/virtio/ |
D | virtio_crypto_core.c | 51 struct virtqueue **vqs; in virtcrypto_find_vqs() local 65 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); in virtcrypto_find_vqs() 66 if (!vqs) in virtcrypto_find_vqs() 87 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); in virtcrypto_find_vqs() 91 vi->ctrl_vq = vqs[total_vqs - 1]; in virtcrypto_find_vqs() 95 vi->data_vq[i].vq = vqs[i]; in virtcrypto_find_vqs() 106 kfree(vqs); in virtcrypto_find_vqs() 116 kfree(vqs); in virtcrypto_find_vqs()
|
/Linux-v5.4/arch/arm64/kvm/ |
D | guest.c | 211 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) argument 216 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; in get_sve_vls() local 224 memset(vqs, 0, sizeof(vqs)); in get_sve_vls() 229 vqs[vq_word(vq)] |= vq_mask(vq); in get_sve_vls() 231 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) in get_sve_vls() 240 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; in set_sve_vls() local 251 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) in set_sve_vls() 256 if (vq_present(vqs, vq)) in set_sve_vls() 270 if (vq_present(vqs, vq) != sve_vq_available(vq)) in set_sve_vls()
|
/Linux-v5.4/drivers/net/ethernet/intel/iavf/ |
D | iavf_virtchnl.c | 301 struct virtchnl_queue_select vqs; in iavf_enable_queues() local 310 vqs.vsi_id = adapter->vsi_res->vsi_id; in iavf_enable_queues() 311 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; in iavf_enable_queues() 312 vqs.rx_queues = vqs.tx_queues; in iavf_enable_queues() 315 (u8 *)&vqs, sizeof(vqs)); in iavf_enable_queues() 326 struct virtchnl_queue_select vqs; in iavf_disable_queues() local 335 vqs.vsi_id = adapter->vsi_res->vsi_id; in iavf_disable_queues() 336 vqs.tx_queues = BIT(adapter->num_active_queues) - 1; in iavf_disable_queues() 337 vqs.rx_queues = vqs.tx_queues; in iavf_disable_queues() 340 (u8 *)&vqs, sizeof(vqs)); in iavf_disable_queues() [all …]
|
/Linux-v5.4/drivers/remoteproc/ |
D | remoteproc_virtio.c | 131 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in __rproc_virtio_del_vqs() 144 struct virtqueue *vqs[], in rproc_virtio_find_vqs() argument 154 vqs[i] = NULL; in rproc_virtio_find_vqs() 158 vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], in rproc_virtio_find_vqs() 160 if (IS_ERR(vqs[i])) { in rproc_virtio_find_vqs() 161 ret = PTR_ERR(vqs[i]); in rproc_virtio_find_vqs()
|
/Linux-v5.4/include/linux/ |
D | virtio_config.h | 80 struct virtqueue *vqs[], vq_callback_t *callbacks[], 193 struct virtqueue *vqs[], vq_callback_t *callbacks[], in virtio_find_vqs() argument 197 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); in virtio_find_vqs() 202 struct virtqueue *vqs[], vq_callback_t *callbacks[], in virtio_find_vqs_ctx() argument 206 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, in virtio_find_vqs_ctx()
|
D | virtio.h | 118 struct list_head vqs; member 146 list_for_each_entry(vq, &vdev->vqs, list)
|
/Linux-v5.4/drivers/gpu/drm/virtio/ |
D | virtgpu_kms.c | 120 struct virtqueue *vqs[2]; in virtio_gpu_init() local 163 ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); in virtio_gpu_init() 168 vgdev->ctrlq.vq = vqs[0]; in virtio_gpu_init() 169 vgdev->cursorq.vq = vqs[1]; in virtio_gpu_init()
|
/Linux-v5.4/drivers/net/ethernet/intel/ice/ |
D | ice_virtchnl_pf.c | 1870 struct virtchnl_queue_select *vqs = in ice_vc_get_stats_msg() local 1881 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { in ice_vc_get_stats_msg() 1913 struct virtchnl_queue_select *vqs = in ice_vc_ena_qs_msg() local 1925 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { in ice_vc_ena_qs_msg() 1930 if (!vqs->rx_queues && !vqs->tx_queues) { in ice_vc_ena_qs_msg() 1935 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || in ice_vc_ena_qs_msg() 1936 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { in ice_vc_ena_qs_msg() 1951 q_map = vqs->rx_queues; in ice_vc_ena_qs_msg() 1953 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { in ice_vc_ena_qs_msg() 1975 q_map = vqs->tx_queues; in ice_vc_ena_qs_msg() [all …]
|
/Linux-v5.4/net/sched/ |
D | sch_gred.c | 545 static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) in gred_vqs_apply() argument 550 nla_for_each_nested(attr, vqs, rem) { in gred_vqs_apply() 604 struct nlattr *vqs, struct netlink_ext_ack *extack) in gred_vqs_validate() argument 609 err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX, in gred_vqs_validate() 614 nla_for_each_nested(attr, vqs, rem) { in gred_vqs_validate() 760 struct nlattr *parms, *vqs, *opts = NULL; in gred_dump() local 842 vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST); in gred_dump() 843 if (!vqs) in gred_dump() 891 nla_nest_end(skb, vqs); in gred_dump()
|
/Linux-v5.4/drivers/scsi/ |
D | virtio_scsi.c | 783 struct virtqueue **vqs; in virtscsi_init() local 787 vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL); in virtscsi_init() 792 if (!callbacks || !vqs || !names) { in virtscsi_init() 807 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); in virtscsi_init() 811 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); in virtscsi_init() 812 virtscsi_init_vq(&vscsi->event_vq, vqs[1]); in virtscsi_init() 815 vqs[i]); in virtscsi_init() 825 kfree(vqs); in virtscsi_init()
|
/Linux-v5.4/drivers/s390/virtio/ |
D | virtio_ccw.c | 261 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, in get_airq_indicator() argument 289 (unsigned long)vqs[j]); in get_airq_indicator() 505 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in virtio_ccw_del_vqs() 604 struct virtqueue *vqs[], int nvqs, in virtio_ccw_register_adapter_ind() argument 618 thinint_area->indicator = get_airq_indicator(vqs, nvqs, in virtio_ccw_register_adapter_ind() 653 struct virtqueue *vqs[], in virtio_ccw_find_vqs() argument 670 vqs[i] = NULL; in virtio_ccw_find_vqs() 674 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], in virtio_ccw_find_vqs() 677 if (IS_ERR(vqs[i])) { in virtio_ccw_find_vqs() 678 ret = PTR_ERR(vqs[i]); in virtio_ccw_find_vqs() [all …]
|
/Linux-v5.4/drivers/misc/mic/vop/ |
D | vop_main.c | 269 list_for_each_entry_safe(vq, n, &dev->vqs, list) in vop_del_vqs() 384 struct virtqueue *vqs[], in vop_find_vqs() argument 400 vqs[i] = NULL; in vop_find_vqs() 406 vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], in vop_find_vqs() 408 if (IS_ERR(vqs[i])) { in vop_find_vqs() 409 err = PTR_ERR(vqs[i]); in vop_find_vqs() 460 list_for_each_entry(vq, &vdev->vdev.vqs, list) in vop_virtio_intr_handler()
|
/Linux-v5.4/tools/virtio/ |
D | virtio_test.c | 39 struct vq_info vqs[1]; member 93 struct vq_info *info = &dev->vqs[dev->nvqs]; in vq_info_add() 301 run_test(&dev, &dev.vqs[0], delayed, 0x100000); in main()
|