Lines Matching refs:vdpasim
49 struct vdpasim *vdpasim; member
58 struct vdpasim *vdpasim = mm_work->vdpasim; in vdpasim_mm_work_fn() local
63 vdpasim->mm_bound = mm_work->mm_to_bind; in vdpasim_mm_work_fn()
66 static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim, in vdpasim_worker_change_mm_sync() argument
72 kthread_queue_work(vdpasim->worker, work); in vdpasim_worker_change_mm_sync()
77 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) in vdpa_to_sim()
79 return container_of(vdpa, struct vdpasim, vdpa); in vdpa_to_sim()
93 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) in vdpasim_queue_ready() argument
95 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
104 if (use_va && vdpasim->mm_bound) { in vdpasim_queue_ready()
105 vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num, in vdpasim_queue_ready()
108 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, in vdpasim_queue_ready()
127 static void vdpasim_vq_reset(struct vdpasim *vdpasim, in vdpasim_vq_reset() argument
136 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, in vdpasim_vq_reset()
142 static void vdpasim_do_reset(struct vdpasim *vdpasim) in vdpasim_do_reset() argument
146 spin_lock(&vdpasim->iommu_lock); in vdpasim_do_reset()
148 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_do_reset()
149 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); in vdpasim_do_reset()
150 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_do_reset()
151 &vdpasim->iommu_lock); in vdpasim_do_reset()
154 for (i = 0; i < vdpasim->dev_attr.nas; i++) { in vdpasim_do_reset()
155 vhost_iotlb_reset(&vdpasim->iommu[i]); in vdpasim_do_reset()
156 vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, in vdpasim_do_reset()
158 vdpasim->iommu_pt[i] = true; in vdpasim_do_reset()
161 vdpasim->running = true; in vdpasim_do_reset()
162 spin_unlock(&vdpasim->iommu_lock); in vdpasim_do_reset()
164 vdpasim->features = 0; in vdpasim_do_reset()
165 vdpasim->status = 0; in vdpasim_do_reset()
166 ++vdpasim->generation; in vdpasim_do_reset()
174 struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); in vdpasim_work_fn() local
175 struct mm_struct *mm = vdpasim->mm_bound; in vdpasim_work_fn()
183 vdpasim->dev_attr.work_fn(vdpasim); in vdpasim_work_fn()
191 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr, in vdpasim_create()
196 struct vdpasim *vdpasim; in vdpasim_create() local
225 vdpasim = vdpa_to_sim(vdpa); in vdpasim_create()
226 vdpasim->dev_attr = *dev_attr; in vdpasim_create()
227 dev = &vdpasim->vdpa.dev; in vdpasim_create()
229 kthread_init_work(&vdpasim->work, vdpasim_work_fn); in vdpasim_create()
230 vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s", in vdpasim_create()
232 if (IS_ERR(vdpasim->worker)) in vdpasim_create()
235 mutex_init(&vdpasim->mutex); in vdpasim_create()
236 spin_lock_init(&vdpasim->iommu_lock); in vdpasim_create()
241 vdpasim->vdpa.mdev = dev_attr->mgmt_dev; in vdpasim_create()
243 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); in vdpasim_create()
244 if (!vdpasim->config) in vdpasim_create()
247 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), in vdpasim_create()
249 if (!vdpasim->vqs) in vdpasim_create()
252 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas, in vdpasim_create()
253 sizeof(*vdpasim->iommu), GFP_KERNEL); in vdpasim_create()
254 if (!vdpasim->iommu) in vdpasim_create()
257 vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas, in vdpasim_create()
258 sizeof(*vdpasim->iommu_pt), GFP_KERNEL); in vdpasim_create()
259 if (!vdpasim->iommu_pt) in vdpasim_create()
262 for (i = 0; i < vdpasim->dev_attr.nas; i++) in vdpasim_create()
263 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0); in vdpasim_create()
266 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_create()
267 &vdpasim->iommu_lock); in vdpasim_create()
269 vdpasim->vdpa.dma_dev = dev; in vdpasim_create()
271 return vdpasim; in vdpasim_create()
280 void vdpasim_schedule_work(struct vdpasim *vdpasim) in vdpasim_schedule_work() argument
282 kthread_queue_work(vdpasim->worker, &vdpasim->work); in vdpasim_schedule_work()
290 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_address() local
291 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
302 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_num() local
303 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_num()
310 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_kick_vq() local
311 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_kick_vq()
313 if (!vdpasim->running && in vdpasim_kick_vq()
314 (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) { in vdpasim_kick_vq()
315 vdpasim->pending_kick = true; in vdpasim_kick_vq()
320 vdpasim_schedule_work(vdpasim); in vdpasim_kick_vq()
326 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_cb() local
327 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_cb()
335 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_ready() local
336 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_ready()
339 mutex_lock(&vdpasim->mutex); in vdpasim_set_vq_ready()
343 vdpasim_queue_ready(vdpasim, idx); in vdpasim_set_vq_ready()
345 mutex_unlock(&vdpasim->mutex); in vdpasim_set_vq_ready()
350 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_vq_ready() local
351 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_ready()
359 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_state() local
360 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_state()
363 mutex_lock(&vdpasim->mutex); in vdpasim_set_vq_state()
365 mutex_unlock(&vdpasim->mutex); in vdpasim_set_vq_state()
373 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_vq_state() local
374 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_state()
385 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_vq_stats() local
387 if (vdpasim->dev_attr.get_stats) in vdpasim_get_vq_stats()
388 return vdpasim->dev_attr.get_stats(vdpasim, idx, in vdpasim_get_vq_stats()
409 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_device_features() local
411 return vdpasim->dev_attr.supported_features; in vdpasim_get_device_features()
421 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_driver_features() local
427 vdpasim->features = features & vdpasim->dev_attr.supported_features; in vdpasim_set_driver_features()
434 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_driver_features() local
436 return vdpasim->features; in vdpasim_get_driver_features()
452 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_device_id() local
454 return vdpasim->dev_attr.id; in vdpasim_get_device_id()
464 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_status() local
467 mutex_lock(&vdpasim->mutex); in vdpasim_get_status()
468 status = vdpasim->status; in vdpasim_get_status()
469 mutex_unlock(&vdpasim->mutex); in vdpasim_get_status()
476 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_status() local
478 mutex_lock(&vdpasim->mutex); in vdpasim_set_status()
479 vdpasim->status = status; in vdpasim_set_status()
480 mutex_unlock(&vdpasim->mutex); in vdpasim_set_status()
485 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_reset() local
487 mutex_lock(&vdpasim->mutex); in vdpasim_reset()
488 vdpasim->status = 0; in vdpasim_reset()
489 vdpasim_do_reset(vdpasim); in vdpasim_reset()
490 mutex_unlock(&vdpasim->mutex); in vdpasim_reset()
497 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_suspend() local
499 mutex_lock(&vdpasim->mutex); in vdpasim_suspend()
500 vdpasim->running = false; in vdpasim_suspend()
501 mutex_unlock(&vdpasim->mutex); in vdpasim_suspend()
508 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_resume() local
511 mutex_lock(&vdpasim->mutex); in vdpasim_resume()
512 vdpasim->running = true; in vdpasim_resume()
514 if (vdpasim->pending_kick) { in vdpasim_resume()
516 for (i = 0; i < vdpasim->dev_attr.nvqs; ++i) in vdpasim_resume()
519 vdpasim->pending_kick = false; in vdpasim_resume()
522 mutex_unlock(&vdpasim->mutex); in vdpasim_resume()
529 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_config_size() local
531 return vdpasim->dev_attr.config_size; in vdpasim_get_config_size()
537 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_config() local
539 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_get_config()
542 if (vdpasim->dev_attr.get_config) in vdpasim_get_config()
543 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config); in vdpasim_get_config()
545 memcpy(buf, vdpasim->config + offset, len); in vdpasim_get_config()
551 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_config() local
553 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_set_config()
556 memcpy(vdpasim->config + offset, buf, len); in vdpasim_set_config()
558 if (vdpasim->dev_attr.set_config) in vdpasim_set_config()
559 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config); in vdpasim_set_config()
564 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_generation() local
566 return vdpasim->generation; in vdpasim_get_generation()
582 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_group_asid() local
586 if (group > vdpasim->dev_attr.ngroups) in vdpasim_set_group_asid()
589 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_group_asid()
592 iommu = &vdpasim->iommu[asid]; in vdpasim_set_group_asid()
594 mutex_lock(&vdpasim->mutex); in vdpasim_set_group_asid()
596 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) in vdpasim_set_group_asid()
598 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu, in vdpasim_set_group_asid()
599 &vdpasim->iommu_lock); in vdpasim_set_group_asid()
601 mutex_unlock(&vdpasim->mutex); in vdpasim_set_group_asid()
609 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_map() local
615 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_map()
618 spin_lock(&vdpasim->iommu_lock); in vdpasim_set_map()
620 iommu = &vdpasim->iommu[asid]; in vdpasim_set_map()
622 vdpasim->iommu_pt[asid] = false; in vdpasim_set_map()
631 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
636 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
642 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_bind_mm() local
645 mm_work.vdpasim = vdpasim; in vdpasim_bind_mm()
648 vdpasim_worker_change_mm_sync(vdpasim, &mm_work); in vdpasim_bind_mm()
655 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_unbind_mm() local
658 mm_work.vdpasim = vdpasim; in vdpasim_unbind_mm()
661 vdpasim_worker_change_mm_sync(vdpasim, &mm_work); in vdpasim_unbind_mm()
668 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_dma_map() local
671 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_map()
674 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_map()
675 if (vdpasim->iommu_pt[asid]) { in vdpasim_dma_map()
676 vhost_iotlb_reset(&vdpasim->iommu[asid]); in vdpasim_dma_map()
677 vdpasim->iommu_pt[asid] = false; in vdpasim_dma_map()
679 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova, in vdpasim_dma_map()
681 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_map()
689 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_dma_unmap() local
691 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_unmap()
694 if (vdpasim->iommu_pt[asid]) { in vdpasim_dma_unmap()
695 vhost_iotlb_reset(&vdpasim->iommu[asid]); in vdpasim_dma_unmap()
696 vdpasim->iommu_pt[asid] = false; in vdpasim_dma_unmap()
699 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
700 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1); in vdpasim_dma_unmap()
701 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
708 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_free() local
711 kthread_cancel_work_sync(&vdpasim->work); in vdpasim_free()
712 kthread_destroy_worker(vdpasim->worker); in vdpasim_free()
714 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_free()
715 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); in vdpasim_free()
716 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); in vdpasim_free()
719 vdpasim->dev_attr.free(vdpasim); in vdpasim_free()
721 for (i = 0; i < vdpasim->dev_attr.nas; i++) in vdpasim_free()
722 vhost_iotlb_reset(&vdpasim->iommu[i]); in vdpasim_free()
723 kfree(vdpasim->iommu); in vdpasim_free()
724 kfree(vdpasim->iommu_pt); in vdpasim_free()
725 kfree(vdpasim->vqs); in vdpasim_free()
726 kfree(vdpasim->config); in vdpasim_free()