Lines Matching +full:iommu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
16 #include <linux/dma-map-ops.h>
32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
60 if (!vq->cb) in vdpasim_vq_notify()
63 vq->cb(vq->private); in vdpasim_vq_notify()
68 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
70 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, in vdpasim_queue_ready()
72 (struct vring_desc *)(uintptr_t)vq->desc_addr, in vdpasim_queue_ready()
74 (uintptr_t)vq->driver_addr, in vdpasim_queue_ready()
76 (uintptr_t)vq->device_addr); in vdpasim_queue_ready()
78 vq->vring.notify = vdpasim_vq_notify; in vdpasim_queue_ready()
84 vq->ready = false; in vdpasim_vq_reset()
85 vq->desc_addr = 0; in vdpasim_vq_reset()
86 vq->driver_addr = 0; in vdpasim_vq_reset()
87 vq->device_addr = 0; in vdpasim_vq_reset()
88 vq->cb = NULL; in vdpasim_vq_reset()
89 vq->private = NULL; in vdpasim_vq_reset()
90 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, in vdpasim_vq_reset()
93 vq->vring.notify = NULL; in vdpasim_vq_reset()
100 spin_lock(&vdpasim->iommu_lock); in vdpasim_do_reset()
102 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_do_reset()
103 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); in vdpasim_do_reset()
104 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_do_reset()
105 &vdpasim->iommu_lock); in vdpasim_do_reset()
108 for (i = 0; i < vdpasim->dev_attr.nas; i++) in vdpasim_do_reset()
109 vhost_iotlb_reset(&vdpasim->iommu[i]); in vdpasim_do_reset()
111 vdpasim->running = true; in vdpasim_do_reset()
112 spin_unlock(&vdpasim->iommu_lock); in vdpasim_do_reset()
114 vdpasim->features = 0; in vdpasim_do_reset()
115 vdpasim->status = 0; in vdpasim_do_reset()
116 ++vdpasim->generation; in vdpasim_do_reset()
121 int perm = -EFAULT; in dir_to_perm()
147 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */ in vdpasim_map_range()
148 iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova), in vdpasim_map_range()
149 ULONG_MAX - 1, true); in vdpasim_map_range()
153 dma_addr = iova_dma_addr(&vdpasim->iova, iova); in vdpasim_map_range()
155 spin_lock(&vdpasim->iommu_lock); in vdpasim_map_range()
156 ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr, in vdpasim_map_range()
157 (u64)dma_addr + size - 1, (u64)paddr, perm); in vdpasim_map_range()
158 spin_unlock(&vdpasim->iommu_lock); in vdpasim_map_range()
161 __free_iova(&vdpasim->iova, iova); in vdpasim_map_range()
171 spin_lock(&vdpasim->iommu_lock); in vdpasim_unmap_range()
172 vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr, in vdpasim_unmap_range()
173 (u64)dma_addr + size - 1); in vdpasim_unmap_range()
174 spin_unlock(&vdpasim->iommu_lock); in vdpasim_unmap_range()
176 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr)); in vdpasim_unmap_range()
255 int i, ret = -ENOMEM; in vdpasim_create()
257 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { in vdpasim_create()
258 if (config->device_features & in vdpasim_create()
259 ~dev_attr->supported_features) in vdpasim_create()
260 return ERR_PTR(-EINVAL); in vdpasim_create()
261 dev_attr->supported_features = in vdpasim_create()
262 config->device_features; in vdpasim_create()
271 dev_attr->ngroups, dev_attr->nas, in vdpasim_create()
272 dev_attr->name, false); in vdpasim_create()
278 vdpasim->dev_attr = *dev_attr; in vdpasim_create()
279 INIT_WORK(&vdpasim->work, dev_attr->work_fn); in vdpasim_create()
280 spin_lock_init(&vdpasim->lock); in vdpasim_create()
281 spin_lock_init(&vdpasim->iommu_lock); in vdpasim_create()
283 dev = &vdpasim->vdpa.dev; in vdpasim_create()
284 dev->dma_mask = &dev->coherent_dma_mask; in vdpasim_create()
288 vdpasim->vdpa.mdev = dev_attr->mgmt_dev; in vdpasim_create()
290 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); in vdpasim_create()
291 if (!vdpasim->config) in vdpasim_create()
294 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), in vdpasim_create()
296 if (!vdpasim->vqs) in vdpasim_create()
299 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas, in vdpasim_create()
300 sizeof(*vdpasim->iommu), GFP_KERNEL); in vdpasim_create()
301 if (!vdpasim->iommu) in vdpasim_create()
304 for (i = 0; i < vdpasim->dev_attr.nas; i++) in vdpasim_create()
305 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0); in vdpasim_create()
307 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL); in vdpasim_create()
308 if (!vdpasim->buffer) in vdpasim_create()
311 for (i = 0; i < dev_attr->nvqs; i++) in vdpasim_create()
312 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_create()
313 &vdpasim->iommu_lock); in vdpasim_create()
320 init_iova_domain(&vdpasim->iova, 1, 0); in vdpasim_create()
322 vdpasim->vdpa.dma_dev = dev; in vdpasim_create()
338 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
340 vq->desc_addr = desc_area; in vdpasim_set_vq_address()
341 vq->driver_addr = driver_area; in vdpasim_set_vq_address()
342 vq->device_addr = device_area; in vdpasim_set_vq_address()
350 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_num()
352 vq->num = num; in vdpasim_set_vq_num()
358 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_kick_vq()
360 if (vq->ready) in vdpasim_kick_vq()
361 schedule_work(&vdpasim->work); in vdpasim_kick_vq()
368 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_cb()
370 vq->cb = cb->callback; in vdpasim_set_vq_cb()
371 vq->private = cb->private; in vdpasim_set_vq_cb()
377 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_ready()
380 spin_lock(&vdpasim->lock); in vdpasim_set_vq_ready()
381 old_ready = vq->ready; in vdpasim_set_vq_ready()
382 vq->ready = ready; in vdpasim_set_vq_ready()
383 if (vq->ready && !old_ready) { in vdpasim_set_vq_ready()
386 spin_unlock(&vdpasim->lock); in vdpasim_set_vq_ready()
392 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_ready()
394 return vq->ready; in vdpasim_get_vq_ready()
401 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_state()
402 struct vringh *vrh = &vq->vring; in vdpasim_set_vq_state()
404 spin_lock(&vdpasim->lock); in vdpasim_set_vq_state()
405 vrh->last_avail_idx = state->split.avail_index; in vdpasim_set_vq_state()
406 spin_unlock(&vdpasim->lock); in vdpasim_set_vq_state()
415 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_state()
416 struct vringh *vrh = &vq->vring; in vdpasim_get_vq_state()
418 state->split.avail_index = vrh->last_avail_idx; in vdpasim_get_vq_state()
440 return vdpasim->dev_attr.supported_features; in vdpasim_get_device_features()
449 return -EINVAL; in vdpasim_set_driver_features()
451 vdpasim->features = features & vdpasim->dev_attr.supported_features; in vdpasim_set_driver_features()
460 return vdpasim->features; in vdpasim_get_driver_features()
478 return vdpasim->dev_attr.id; in vdpasim_get_device_id()
491 spin_lock(&vdpasim->lock); in vdpasim_get_status()
492 status = vdpasim->status; in vdpasim_get_status()
493 spin_unlock(&vdpasim->lock); in vdpasim_get_status()
502 spin_lock(&vdpasim->lock); in vdpasim_set_status()
503 vdpasim->status = status; in vdpasim_set_status()
504 spin_unlock(&vdpasim->lock); in vdpasim_set_status()
511 spin_lock(&vdpasim->lock); in vdpasim_reset()
512 vdpasim->status = 0; in vdpasim_reset()
514 spin_unlock(&vdpasim->lock); in vdpasim_reset()
523 spin_lock(&vdpasim->lock); in vdpasim_suspend()
524 vdpasim->running = false; in vdpasim_suspend()
525 spin_unlock(&vdpasim->lock); in vdpasim_suspend()
534 return vdpasim->dev_attr.config_size; in vdpasim_get_config_size()
542 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_get_config()
545 if (vdpasim->dev_attr.get_config) in vdpasim_get_config()
546 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config); in vdpasim_get_config()
548 memcpy(buf, vdpasim->config + offset, len); in vdpasim_get_config()
556 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_set_config()
559 memcpy(vdpasim->config + offset, buf, len); in vdpasim_set_config()
561 if (vdpasim->dev_attr.set_config) in vdpasim_set_config()
562 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config); in vdpasim_set_config()
569 return vdpasim->generation; in vdpasim_get_generation()
586 struct vhost_iotlb *iommu; in vdpasim_set_group_asid() local
589 if (group > vdpasim->dev_attr.ngroups) in vdpasim_set_group_asid()
590 return -EINVAL; in vdpasim_set_group_asid()
592 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_group_asid()
593 return -EINVAL; in vdpasim_set_group_asid()
595 iommu = &vdpasim->iommu[asid]; in vdpasim_set_group_asid()
597 spin_lock(&vdpasim->lock); in vdpasim_set_group_asid()
599 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) in vdpasim_set_group_asid()
601 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu, in vdpasim_set_group_asid()
602 &vdpasim->iommu_lock); in vdpasim_set_group_asid()
604 spin_unlock(&vdpasim->lock); in vdpasim_set_group_asid()
613 struct vhost_iotlb_map *map; in vdpasim_set_map() local
614 struct vhost_iotlb *iommu; in vdpasim_set_map() local
615 u64 start = 0ULL, last = 0ULL - 1; in vdpasim_set_map()
618 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_map()
619 return -EINVAL; in vdpasim_set_map()
621 spin_lock(&vdpasim->iommu_lock); in vdpasim_set_map()
623 iommu = &vdpasim->iommu[asid]; in vdpasim_set_map()
624 vhost_iotlb_reset(iommu); in vdpasim_set_map()
626 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; in vdpasim_set_map()
627 map = vhost_iotlb_itree_next(map, start, last)) { in vdpasim_set_map()
628 ret = vhost_iotlb_add_range(iommu, map->start, in vdpasim_set_map()
629 map->last, map->addr, map->perm); in vdpasim_set_map()
633 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
637 vhost_iotlb_reset(iommu); in vdpasim_set_map()
638 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
649 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_map()
650 return -EINVAL; in vdpasim_dma_map()
652 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_map()
653 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova, in vdpasim_dma_map()
654 iova + size - 1, pa, perm, opaque); in vdpasim_dma_map()
655 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_map()
665 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_unmap()
666 return -EINVAL; in vdpasim_dma_unmap()
668 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
669 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1); in vdpasim_dma_unmap()
670 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
680 cancel_work_sync(&vdpasim->work); in vdpasim_free()
682 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_free()
683 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); in vdpasim_free()
684 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); in vdpasim_free()
688 put_iova_domain(&vdpasim->iova); in vdpasim_free()
692 kvfree(vdpasim->buffer); in vdpasim_free()
693 vhost_iotlb_free(vdpasim->iommu); in vdpasim_free()
694 kfree(vdpasim->vqs); in vdpasim_free()
695 kfree(vdpasim->config); in vdpasim_free()