Lines Matching refs:viommu

64 	struct viommu_dev		*viommu;  member
78 struct viommu_dev *viommu; member
136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, in viommu_get_write_desc_offset() argument
143 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset()
154 static int __viommu_sync_req(struct viommu_dev *viommu) in __viommu_sync_req() argument
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req()
161 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req()
165 while (!list_empty(&viommu->requests)) { in __viommu_sync_req()
187 static int viommu_sync_req(struct viommu_dev *viommu) in viommu_sync_req() argument
192 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_sync_req()
193 ret = __viommu_sync_req(viommu); in viommu_sync_req()
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_sync_req()
196 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_sync_req()
217 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, in __viommu_add_req() argument
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_add_req()
227 assert_spin_locked(&viommu->request_lock); in __viommu_add_req()
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len); in __viommu_add_req()
250 if (!__viommu_sync_req(viommu)) in __viommu_add_req()
256 list_add_tail(&req->list, &viommu->requests); in __viommu_add_req()
264 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) in viommu_add_req() argument
269 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_add_req()
270 ret = __viommu_add_req(viommu, buf, len, false); in viommu_add_req()
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret); in viommu_add_req()
273 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_add_req()
282 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, in viommu_send_req_sync() argument
288 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_send_req_sync()
290 ret = __viommu_add_req(viommu, buf, len, true); in viommu_send_req_sync()
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret); in viommu_send_req_sync()
296 ret = __viommu_sync_req(viommu); in viommu_send_req_sync()
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_send_req_sync()
304 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_send_req_sync()
454 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_replay_mappings()
514 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) in viommu_probe_endpoint() argument
528 probe_len = sizeof(*probe) + viommu->probe_size + in viommu_probe_endpoint()
541 ret = viommu_send_req_sync(viommu, probe, probe_len); in viommu_probe_endpoint()
549 cur < viommu->probe_size) { in viommu_probe_endpoint()
564 if (cur >= viommu->probe_size) in viommu_probe_endpoint()
576 static int viommu_fault_handler(struct viommu_dev *viommu, in viommu_fault_handler() argument
601 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", in viommu_fault_handler()
607 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", in viommu_fault_handler()
618 struct viommu_dev *viommu = vq->vdev->priv; in viommu_event_handler() local
622 dev_err(viommu->dev, in viommu_event_handler()
626 viommu_fault_handler(viommu, &evt->fault); in viommu_event_handler()
632 dev_err(viommu->dev, "could not add event buffer\n"); in viommu_event_handler()
665 struct viommu_dev *viommu = vdev->viommu; in viommu_domain_finalise() local
668 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); in viommu_domain_finalise()
676 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, in viommu_domain_finalise()
677 viommu->last_domain, GFP_KERNEL); in viommu_domain_finalise()
683 domain->pgsize_bitmap = viommu->pgsize_bitmap; in viommu_domain_finalise()
684 domain->geometry = viommu->geometry; in viommu_domain_finalise()
686 vdomain->map_flags = viommu->map_flags; in viommu_domain_finalise()
687 vdomain->viommu = viommu; in viommu_domain_finalise()
690 if (virtio_has_feature(viommu->vdev, in viommu_domain_finalise()
698 ida_free(&viommu->domain_ids, vdomain->id); in viommu_domain_finalise()
699 vdomain->viommu = NULL; in viommu_domain_finalise()
714 if (vdomain->viommu) in viommu_domain_free()
715 ida_free(&vdomain->viommu->domain_ids, vdomain->id); in viommu_domain_free()
730 if (!vdomain->viommu) { in viommu_attach_dev()
736 } else if (vdomain->viommu != vdev->viommu) { in viommu_attach_dev()
771 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); in viommu_attach_dev()
826 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_map_pages()
860 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); in viommu_unmap_pages()
889 viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync()
946 struct viommu_dev *viommu = NULL; in viommu_probe_device() local
952 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); in viommu_probe_device()
953 if (!viommu) in viommu_probe_device()
961 vdev->viommu = viommu; in viommu_probe_device()
965 if (viommu->probe_size) { in viommu_probe_device()
967 ret = viommu_probe_endpoint(viommu, dev); in viommu_probe_device()
972 return &viommu->iommu; in viommu_probe_device()
1041 static int viommu_init_vqs(struct viommu_dev *viommu) in viommu_init_vqs() argument
1043 struct virtio_device *vdev = dev_to_virtio(viommu->dev); in viommu_init_vqs()
1050 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, in viommu_init_vqs()
1054 static int viommu_fill_evtq(struct viommu_dev *viommu) in viommu_fill_evtq() argument
1059 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; in viommu_fill_evtq()
1062 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, in viommu_fill_evtq()
1080 struct viommu_dev *viommu = NULL; in viommu_probe() local
1090 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); in viommu_probe()
1091 if (!viommu) in viommu_probe()
1094 spin_lock_init(&viommu->request_lock); in viommu_probe()
1095 ida_init(&viommu->domain_ids); in viommu_probe()
1096 viommu->dev = dev; in viommu_probe()
1097 viommu->vdev = vdev; in viommu_probe()
1098 INIT_LIST_HEAD(&viommu->requests); in viommu_probe()
1100 ret = viommu_init_vqs(viommu); in viommu_probe()
1105 &viommu->pgsize_bitmap); in viommu_probe()
1107 if (!viommu->pgsize_bitmap) { in viommu_probe()
1112 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; in viommu_probe()
1113 viommu->last_domain = ~0U; in viommu_probe()
1126 &viommu->first_domain); in viommu_probe()
1130 &viommu->last_domain); in viommu_probe()
1134 &viommu->probe_size); in viommu_probe()
1136 viommu->geometry = (struct iommu_domain_geometry) { in viommu_probe()
1143 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; in viommu_probe()
1145 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; in viommu_probe()
1150 ret = viommu_fill_evtq(viommu); in viommu_probe()
1154 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", in viommu_probe()
1159 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); in viommu_probe()
1161 vdev->priv = viommu; in viommu_probe()
1164 order_base_2(viommu->geometry.aperture_end)); in viommu_probe()
1165 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); in viommu_probe()
1177 struct viommu_dev *viommu = vdev->priv; in viommu_remove() local
1179 iommu_device_sysfs_remove(&viommu->iommu); in viommu_remove()
1180 iommu_device_unregister(&viommu->iommu); in viommu_remove()