Lines Matching refs:svm

121 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,  in intel_flush_svm_range_dev()  argument
127 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | in intel_flush_svm_range_dev()
135 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | in intel_flush_svm_range_dev()
145 qi_submit_sync(svm->iommu, &desc, 1, 0); in intel_flush_svm_range_dev()
148 desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) | in intel_flush_svm_range_dev()
169 qi_submit_sync(svm->iommu, &desc, 1, 0); in intel_flush_svm_range_dev()
173 static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, in intel_flush_svm_range() argument
179 list_for_each_entry_rcu(sdev, &svm->devs, list) in intel_flush_svm_range()
180 intel_flush_svm_range_dev(svm, sdev, address, pages, ih); in intel_flush_svm_range()
189 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); in intel_invalidate_range() local
191 intel_flush_svm_range(svm, start, in intel_invalidate_range()
197 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); in intel_mm_release() local
213 list_for_each_entry_rcu(sdev, &svm->devs, list) in intel_mm_release()
214 intel_pasid_tear_down_entry(svm->iommu, sdev->dev, in intel_mm_release()
215 svm->pasid, true); in intel_mm_release()
228 #define for_each_svm_dev(sdev, svm, d) \ argument
229 list_for_each_entry((sdev), &(svm)->devs, list) \
237 struct intel_svm *svm; in pasid_to_svm_sdev() local
246 svm = ioasid_find(NULL, pasid, NULL); in pasid_to_svm_sdev()
247 if (IS_ERR(svm)) in pasid_to_svm_sdev()
248 return PTR_ERR(svm); in pasid_to_svm_sdev()
250 if (!svm) in pasid_to_svm_sdev()
257 if (WARN_ON(list_empty(&svm->devs))) in pasid_to_svm_sdev()
261 list_for_each_entry_rcu(d, &svm->devs, list) { in pasid_to_svm_sdev()
270 *rsvm = svm; in pasid_to_svm_sdev()
283 struct intel_svm *svm = NULL; in intel_svm_bind_gpasid() local
321 ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev); in intel_svm_bind_gpasid()
332 svm->pasid); in intel_svm_bind_gpasid()
337 if (!svm) { in intel_svm_bind_gpasid()
339 svm = kzalloc(sizeof(*svm), GFP_KERNEL); in intel_svm_bind_gpasid()
340 if (!svm) { in intel_svm_bind_gpasid()
349 svm->mm = get_task_mm(current); in intel_svm_bind_gpasid()
350 svm->pasid = data->hpasid; in intel_svm_bind_gpasid()
352 svm->gpasid = data->gpasid; in intel_svm_bind_gpasid()
353 svm->flags |= SVM_FLAG_GUEST_PASID; in intel_svm_bind_gpasid()
355 ioasid_set_data(data->hpasid, svm); in intel_svm_bind_gpasid()
356 INIT_LIST_HEAD_RCU(&svm->devs); in intel_svm_bind_gpasid()
357 mmput(svm->mm); in intel_svm_bind_gpasid()
402 svm->flags |= SVM_FLAG_GUEST_MODE; in intel_svm_bind_gpasid()
405 list_add_rcu(&sdev->list, &svm->devs); in intel_svm_bind_gpasid()
407 if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) { in intel_svm_bind_gpasid()
409 kfree(svm); in intel_svm_bind_gpasid()
420 struct intel_svm *svm; in intel_svm_unbind_gpasid() local
427 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); in intel_svm_unbind_gpasid()
437 svm->pasid, false); in intel_svm_unbind_gpasid()
438 intel_svm_drain_prq(dev, svm->pasid); in intel_svm_unbind_gpasid()
441 if (list_empty(&svm->devs)) { in intel_svm_unbind_gpasid()
452 kfree(svm); in intel_svm_unbind_gpasid()
488 struct intel_svm *svm = NULL; in intel_svm_bind_mm() local
520 svm = t; in intel_svm_bind_mm()
521 if (svm->pasid >= pasid_max) { in intel_svm_bind_mm()
524 svm->pasid); in intel_svm_bind_mm()
530 for_each_svm_dev(sdev, svm, dev) { in intel_svm_bind_mm()
571 if (!svm) { in intel_svm_bind_mm()
572 svm = kzalloc(sizeof(*svm), GFP_KERNEL); in intel_svm_bind_mm()
573 if (!svm) { in intel_svm_bind_mm()
578 svm->iommu = iommu; in intel_svm_bind_mm()
584 svm->pasid = ioasid_alloc(NULL, PASID_MIN, in intel_svm_bind_mm()
585 pasid_max - 1, svm); in intel_svm_bind_mm()
586 if (svm->pasid == INVALID_IOASID) { in intel_svm_bind_mm()
587 kfree(svm); in intel_svm_bind_mm()
592 svm->notifier.ops = &intel_mmuops; in intel_svm_bind_mm()
593 svm->mm = mm; in intel_svm_bind_mm()
594 svm->flags = flags; in intel_svm_bind_mm()
595 INIT_LIST_HEAD_RCU(&svm->devs); in intel_svm_bind_mm()
596 INIT_LIST_HEAD(&svm->list); in intel_svm_bind_mm()
599 ret = mmu_notifier_register(&svm->notifier, mm); in intel_svm_bind_mm()
601 ioasid_free(svm->pasid); in intel_svm_bind_mm()
602 kfree(svm); in intel_svm_bind_mm()
611 svm->pasid, FLPT_DEFAULT_DID, in intel_svm_bind_mm()
618 mmu_notifier_unregister(&svm->notifier, mm); in intel_svm_bind_mm()
619 ioasid_free(svm->pasid); in intel_svm_bind_mm()
620 kfree(svm); in intel_svm_bind_mm()
625 list_add_tail(&svm->list, &global_svm_list); in intel_svm_bind_mm()
628 load_pasid(mm, svm->pasid); in intel_svm_bind_mm()
638 svm->pasid, FLPT_DEFAULT_DID, in intel_svm_bind_mm()
648 list_add_rcu(&sdev->list, &svm->devs); in intel_svm_bind_mm()
650 sdev->pasid = svm->pasid; in intel_svm_bind_mm()
664 struct intel_svm *svm; in intel_svm_unbind_mm() local
671 ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); in intel_svm_unbind_mm()
687 svm->pasid, false); in intel_svm_unbind_mm()
688 intel_svm_drain_prq(dev, svm->pasid); in intel_svm_unbind_mm()
691 if (list_empty(&svm->devs)) { in intel_svm_unbind_mm()
692 ioasid_free(svm->pasid); in intel_svm_unbind_mm()
693 if (svm->mm) { in intel_svm_unbind_mm()
694 mmu_notifier_unregister(&svm->notifier, svm->mm); in intel_svm_unbind_mm()
696 load_pasid(svm->mm, PASID_DISABLED); in intel_svm_unbind_mm()
698 list_del(&svm->list); in intel_svm_unbind_mm()
703 memset(svm, 0x6b, sizeof(*svm)); in intel_svm_unbind_mm()
704 kfree(svm); in intel_svm_unbind_mm()
912 struct intel_svm *svm = NULL; in prq_event_thread() local
942 if (!svm || svm->pasid != req->pasid) { in prq_event_thread()
944 svm = ioasid_find(NULL, req->pasid, NULL); in prq_event_thread()
949 if (IS_ERR_OR_NULL(svm)) { in prq_event_thread()
962 list_for_each_entry_rcu(t, &svm->devs, list) { in prq_event_thread()
974 if (!svm->mm) in prq_event_thread()
985 if (svm->flags & SVM_FLAG_GUEST_MODE) { in prq_event_thread()
993 if (!mmget_not_zero(svm->mm)) in prq_event_thread()
996 mmap_read_lock(svm->mm); in prq_event_thread()
997 vma = find_extend_vma(svm->mm, address); in prq_event_thread()
1012 mmap_read_unlock(svm->mm); in prq_event_thread()
1013 mmput(svm->mm); in prq_event_thread()
1025 svm = NULL; in prq_event_thread()
1129 struct intel_svm *svm = NULL; in intel_svm_page_response() local
1166 ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev); in intel_svm_page_response()
1176 if (svm->flags & SVM_FLAG_GUEST_MODE) { in intel_svm_page_response()
1185 if (mm != svm->mm) { in intel_svm_page_response()