Lines Matching full:viommu

65 	struct viommu_dev		*viommu;  member
66 struct mutex mutex; /* protects viommu pointer */
78 struct viommu_dev *viommu; member
136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, in viommu_get_write_desc_offset() argument
143 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset()
154 static int __viommu_sync_req(struct viommu_dev *viommu) in __viommu_sync_req() argument
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req()
161 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req()
165 while (!list_empty(&viommu->requests)) { in __viommu_sync_req()
187 static int viommu_sync_req(struct viommu_dev *viommu) in viommu_sync_req() argument
192 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_sync_req()
193 ret = __viommu_sync_req(viommu); in viommu_sync_req()
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_sync_req()
196 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_sync_req()
217 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, in __viommu_add_req() argument
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_add_req()
227 assert_spin_locked(&viommu->request_lock); in __viommu_add_req()
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len); in __viommu_add_req()
250 if (!__viommu_sync_req(viommu)) in __viommu_add_req()
256 list_add_tail(&req->list, &viommu->requests); in __viommu_add_req()
264 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) in viommu_add_req() argument
269 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_add_req()
270 ret = __viommu_add_req(viommu, buf, len, false); in viommu_add_req()
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret); in viommu_add_req()
273 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_add_req()
282 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, in viommu_send_req_sync() argument
288 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_send_req_sync()
290 ret = __viommu_add_req(viommu, buf, len, true); in viommu_send_req_sync()
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret); in viommu_send_req_sync()
296 ret = __viommu_sync_req(viommu); in viommu_send_req_sync()
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); in viommu_send_req_sync()
304 spin_unlock_irqrestore(&viommu->request_lock, flags); in viommu_send_req_sync()
407 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_replay_mappings()
460 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) in viommu_probe_endpoint() argument
474 probe_len = sizeof(*probe) + viommu->probe_size + in viommu_probe_endpoint()
487 ret = viommu_send_req_sync(viommu, probe, probe_len); in viommu_probe_endpoint()
495 cur < viommu->probe_size) { in viommu_probe_endpoint()
503 dev_err(dev, "unknown viommu prop 0x%x\n", type); in viommu_probe_endpoint()
507 dev_err(dev, "failed to parse viommu prop 0x%x\n", type); in viommu_probe_endpoint()
510 if (cur >= viommu->probe_size) in viommu_probe_endpoint()
522 static int viommu_fault_handler(struct viommu_dev *viommu, in viommu_fault_handler() argument
547 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", in viommu_fault_handler()
553 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", in viommu_fault_handler()
564 struct viommu_dev *viommu = vq->vdev->priv; in viommu_event_handler() local
568 dev_err(viommu->dev, in viommu_event_handler()
572 viommu_fault_handler(viommu, &evt->fault); in viommu_event_handler()
578 dev_err(viommu->dev, "could not add event buffer\n"); in viommu_event_handler()
609 struct viommu_dev *viommu = vdev->viommu; in viommu_domain_finalise() local
612 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); in viommu_domain_finalise()
620 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, in viommu_domain_finalise()
621 viommu->last_domain, GFP_KERNEL); in viommu_domain_finalise()
627 domain->pgsize_bitmap = viommu->pgsize_bitmap; in viommu_domain_finalise()
628 domain->geometry = viommu->geometry; in viommu_domain_finalise()
630 vdomain->map_flags = viommu->map_flags; in viommu_domain_finalise()
631 vdomain->viommu = viommu; in viommu_domain_finalise()
643 if (vdomain->viommu) in viommu_domain_free()
644 ida_free(&vdomain->viommu->domain_ids, vdomain->id); in viommu_domain_free()
659 if (!vdomain->viommu) { in viommu_attach_dev()
661 * Properly initialize the domain now that we know which viommu in viommu_attach_dev()
665 } else if (vdomain->viommu != vdev->viommu) { in viommu_attach_dev()
666 dev_err(dev, "cannot attach to foreign vIOMMU\n"); in viommu_attach_dev()
697 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); in viommu_attach_dev()
749 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); in viommu_map()
779 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); in viommu_unmap()
808 viommu_sync_req(vdomain->viommu); in viommu_iotlb_sync()
864 struct viommu_dev *viommu = NULL; in viommu_probe_device() local
870 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); in viommu_probe_device()
871 if (!viommu) in viommu_probe_device()
879 vdev->viommu = viommu; in viommu_probe_device()
883 if (viommu->probe_size) { in viommu_probe_device()
885 ret = viommu_probe_endpoint(viommu, dev); in viommu_probe_device()
890 return &viommu->iommu; in viommu_probe_device()
953 static int viommu_init_vqs(struct viommu_dev *viommu) in viommu_init_vqs() argument
955 struct virtio_device *vdev = dev_to_virtio(viommu->dev); in viommu_init_vqs()
962 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, in viommu_init_vqs()
966 static int viommu_fill_evtq(struct viommu_dev *viommu) in viommu_fill_evtq() argument
971 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; in viommu_fill_evtq()
974 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, in viommu_fill_evtq()
992 struct viommu_dev *viommu = NULL; in viommu_probe() local
1002 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); in viommu_probe()
1003 if (!viommu) in viommu_probe()
1006 spin_lock_init(&viommu->request_lock); in viommu_probe()
1007 ida_init(&viommu->domain_ids); in viommu_probe()
1008 viommu->dev = dev; in viommu_probe()
1009 viommu->vdev = vdev; in viommu_probe()
1010 INIT_LIST_HEAD(&viommu->requests); in viommu_probe()
1012 ret = viommu_init_vqs(viommu); in viommu_probe()
1017 &viommu->pgsize_bitmap); in viommu_probe()
1019 if (!viommu->pgsize_bitmap) { in viommu_probe()
1024 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; in viommu_probe()
1025 viommu->last_domain = ~0U; in viommu_probe()
1038 &viommu->first_domain); in viommu_probe()
1042 &viommu->last_domain); in viommu_probe()
1046 &viommu->probe_size); in viommu_probe()
1048 viommu->geometry = (struct iommu_domain_geometry) { in viommu_probe()
1055 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; in viommu_probe()
1057 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; in viommu_probe()
1062 ret = viommu_fill_evtq(viommu); in viommu_probe()
1066 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", in viommu_probe()
1071 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); in viommu_probe()
1093 vdev->priv = viommu; in viommu_probe()
1096 order_base_2(viommu->geometry.aperture_end)); in viommu_probe()
1097 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); in viommu_probe()
1102 iommu_device_sysfs_remove(&viommu->iommu); in viommu_probe()
1103 iommu_device_unregister(&viommu->iommu); in viommu_probe()
1112 struct viommu_dev *viommu = vdev->priv; in viommu_remove() local
1114 iommu_device_sysfs_remove(&viommu->iommu); in viommu_remove()
1115 iommu_device_unregister(&viommu->iommu); in viommu_remove()