Lines Matching +full:iommu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
20 #include <linux/iommu.h>
21 #include <linux/dma-map-ops.h>
39 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
63 #define VDPASIM_NAME "vdpasim-netdev"
78 struct vhost_iotlb *iommu; member
83 /* spinlock to synchronize iommu table */
87 /* TODO: cross-endian support */
91 (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1)); in vdpasim_is_little_endian()
120 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
122 vringh_init_iotlb(&vq->vring, vdpasim_features, in vdpasim_queue_ready()
124 (struct vring_desc *)(uintptr_t)vq->desc_addr, in vdpasim_queue_ready()
126 (uintptr_t)vq->driver_addr, in vdpasim_queue_ready()
128 (uintptr_t)vq->device_addr); in vdpasim_queue_ready()
133 vq->ready = false; in vdpasim_vq_reset()
134 vq->desc_addr = 0; in vdpasim_vq_reset()
135 vq->driver_addr = 0; in vdpasim_vq_reset()
136 vq->device_addr = 0; in vdpasim_vq_reset()
137 vq->cb = NULL; in vdpasim_vq_reset()
138 vq->private = NULL; in vdpasim_vq_reset()
139 vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX, in vdpasim_vq_reset()
148 vdpasim_vq_reset(&vdpasim->vqs[i]); in vdpasim_reset()
150 spin_lock(&vdpasim->iommu_lock); in vdpasim_reset()
151 vhost_iotlb_reset(vdpasim->iommu); in vdpasim_reset()
152 spin_unlock(&vdpasim->iommu_lock); in vdpasim_reset()
154 vdpasim->features = 0; in vdpasim_reset()
155 vdpasim->status = 0; in vdpasim_reset()
156 ++vdpasim->generation; in vdpasim_reset()
163 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; in vdpasim_work()
164 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; in vdpasim_work()
170 spin_lock(&vdpasim->lock); in vdpasim_work()
172 if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) in vdpasim_work()
175 if (!txq->ready || !rxq->ready) in vdpasim_work()
180 err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL, in vdpasim_work()
181 &txq->head, GFP_ATOMIC); in vdpasim_work()
185 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov, in vdpasim_work()
186 &rxq->head, GFP_ATOMIC); in vdpasim_work()
188 vringh_complete_iotlb(&txq->vring, txq->head, 0); in vdpasim_work()
193 read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov, in vdpasim_work()
194 vdpasim->buffer, in vdpasim_work()
199 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov, in vdpasim_work()
200 vdpasim->buffer, read); in vdpasim_work()
210 vringh_complete_iotlb(&txq->vring, txq->head, 0); in vdpasim_work()
211 vringh_complete_iotlb(&rxq->vring, rxq->head, total_write); in vdpasim_work()
217 if (txq->cb) in vdpasim_work()
218 txq->cb(txq->private); in vdpasim_work()
219 if (rxq->cb) in vdpasim_work()
220 rxq->cb(rxq->private); in vdpasim_work()
224 schedule_work(&vdpasim->work); in vdpasim_work()
230 spin_unlock(&vdpasim->lock); in vdpasim_work()
235 int perm = -EFAULT; in dir_to_perm()
260 struct vhost_iotlb *iommu = vdpasim->iommu; in vdpasim_map_page() local
270 spin_lock(&vdpasim->iommu_lock); in vdpasim_map_page()
271 ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, in vdpasim_map_page()
273 spin_unlock(&vdpasim->iommu_lock); in vdpasim_map_page()
285 struct vhost_iotlb *iommu = vdpasim->iommu; in vdpasim_unmap_page() local
287 spin_lock(&vdpasim->iommu_lock); in vdpasim_unmap_page()
288 vhost_iotlb_del_range(iommu, (u64)dma_addr, in vdpasim_unmap_page()
289 (u64)dma_addr + size - 1); in vdpasim_unmap_page()
290 spin_unlock(&vdpasim->iommu_lock); in vdpasim_unmap_page()
298 struct vhost_iotlb *iommu = vdpasim->iommu; in vdpasim_alloc_coherent() local
302 spin_lock(&vdpasim->iommu_lock); in vdpasim_alloc_coherent()
308 ret = vhost_iotlb_add_range(iommu, (u64)pa, in vdpasim_alloc_coherent()
309 (u64)pa + size - 1, in vdpasim_alloc_coherent()
318 spin_unlock(&vdpasim->iommu_lock); in vdpasim_alloc_coherent()
328 struct vhost_iotlb *iommu = vdpasim->iommu; in vdpasim_free_coherent() local
330 spin_lock(&vdpasim->iommu_lock); in vdpasim_free_coherent()
331 vhost_iotlb_del_range(iommu, (u64)dma_addr, in vdpasim_free_coherent()
332 (u64)dma_addr + size - 1); in vdpasim_free_coherent()
333 spin_unlock(&vdpasim->iommu_lock); in vdpasim_free_coherent()
353 int ret = -ENOMEM; in vdpasim_create()
364 INIT_WORK(&vdpasim->work, vdpasim_work); in vdpasim_create()
365 spin_lock_init(&vdpasim->lock); in vdpasim_create()
366 spin_lock_init(&vdpasim->iommu_lock); in vdpasim_create()
368 dev = &vdpasim->vdpa.dev; in vdpasim_create()
369 dev->dma_mask = &dev->coherent_dma_mask; in vdpasim_create()
374 vdpasim->iommu = vhost_iotlb_alloc(2048, 0); in vdpasim_create()
375 if (!vdpasim->iommu) in vdpasim_create()
378 vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); in vdpasim_create()
379 if (!vdpasim->buffer) in vdpasim_create()
383 mac_pton(macaddr, vdpasim->config.mac); in vdpasim_create()
384 if (!is_valid_ether_addr(vdpasim->config.mac)) { in vdpasim_create()
385 ret = -EADDRNOTAVAIL; in vdpasim_create()
389 eth_random_addr(vdpasim->config.mac); in vdpasim_create()
392 vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu); in vdpasim_create()
393 vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu); in vdpasim_create()
395 vdpasim->vdpa.dma_dev = dev; in vdpasim_create()
396 ret = vdpa_register_device(&vdpasim->vdpa); in vdpasim_create()
413 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
415 vq->desc_addr = desc_area; in vdpasim_set_vq_address()
416 vq->driver_addr = driver_area; in vdpasim_set_vq_address()
417 vq->device_addr = device_area; in vdpasim_set_vq_address()
425 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_num()
427 vq->num = num; in vdpasim_set_vq_num()
433 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_kick_vq()
435 if (vq->ready) in vdpasim_kick_vq()
436 schedule_work(&vdpasim->work); in vdpasim_kick_vq()
443 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_cb()
445 vq->cb = cb->callback; in vdpasim_set_vq_cb()
446 vq->private = cb->private; in vdpasim_set_vq_cb()
452 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_ready()
454 spin_lock(&vdpasim->lock); in vdpasim_set_vq_ready()
455 vq->ready = ready; in vdpasim_set_vq_ready()
456 if (vq->ready) in vdpasim_set_vq_ready()
458 spin_unlock(&vdpasim->lock); in vdpasim_set_vq_ready()
464 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_ready()
466 return vq->ready; in vdpasim_get_vq_ready()
473 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_state()
474 struct vringh *vrh = &vq->vring; in vdpasim_set_vq_state()
476 spin_lock(&vdpasim->lock); in vdpasim_set_vq_state()
477 vrh->last_avail_idx = state->avail_index; in vdpasim_set_vq_state()
478 spin_unlock(&vdpasim->lock); in vdpasim_set_vq_state()
487 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_state()
488 struct vringh *vrh = &vq->vring; in vdpasim_get_vq_state()
490 state->avail_index = vrh->last_avail_idx; in vdpasim_get_vq_state()
507 struct virtio_net_config *config = &vdpasim->config; in vdpasim_set_features()
511 return -EINVAL; in vdpasim_set_features()
513 vdpasim->features = features & vdpasim_features; in vdpasim_set_features()
521 config->mtu = cpu_to_vdpasim16(vdpasim, 1500); in vdpasim_set_features()
522 config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); in vdpasim_set_features()
552 spin_lock(&vdpasim->lock); in vdpasim_get_status()
553 status = vdpasim->status; in vdpasim_get_status()
554 spin_unlock(&vdpasim->lock); in vdpasim_get_status()
563 spin_lock(&vdpasim->lock); in vdpasim_set_status()
564 vdpasim->status = status; in vdpasim_set_status()
567 spin_unlock(&vdpasim->lock); in vdpasim_set_status()
576 memcpy(buf, (u8 *)&vdpasim->config + offset, len); in vdpasim_get_config()
589 return vdpasim->generation; in vdpasim_get_generation()
606 struct vhost_iotlb_map *map; in vdpasim_set_map() local
607 u64 start = 0ULL, last = 0ULL - 1; in vdpasim_set_map()
610 spin_lock(&vdpasim->iommu_lock); in vdpasim_set_map()
611 vhost_iotlb_reset(vdpasim->iommu); in vdpasim_set_map()
613 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; in vdpasim_set_map()
614 map = vhost_iotlb_itree_next(map, start, last)) { in vdpasim_set_map()
615 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start, in vdpasim_set_map()
616 map->last, map->addr, map->perm); in vdpasim_set_map()
620 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
624 vhost_iotlb_reset(vdpasim->iommu); in vdpasim_set_map()
625 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
635 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_map()
636 ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa, in vdpasim_dma_map()
638 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_map()
647 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
648 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); in vdpasim_dma_unmap()
649 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
658 cancel_work_sync(&vdpasim->work); in vdpasim_free()
659 kfree(vdpasim->buffer); in vdpasim_free()
660 if (vdpasim->iommu) in vdpasim_free()
661 vhost_iotlb_free(vdpasim->iommu); in vdpasim_free()
729 struct vdpa_device *vdpa = &vdpasim_dev->vdpa; in vdpasim_dev_exit()