Lines Matching refs:vm

98 	struct amdgpu_vm *vm;  member
117 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
122 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
125 if (vm->pasid) { in amdgpu_vm_set_pasid()
126 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
130 vm->pasid = 0; in amdgpu_vm_set_pasid()
134 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
139 vm->pasid = pasid; in amdgpu_vm_set_pasid()
156 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
160 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
162 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
164 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
165 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
177 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
178 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
179 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
192 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
193 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
194 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
208 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
209 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
210 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
224 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
225 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
226 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
242 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
243 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
244 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
258 struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_bo_base_init() argument
260 base->vm = vm; in amdgpu_vm_bo_base_init()
270 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
273 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
275 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
303 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, in amdgpu_vm_get_pd_bo() argument
308 entry->tv.bo = &vm->root.bo->tbo; in amdgpu_vm_get_pd_bo()
325 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
328 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
345 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate_pt_bos() argument
354 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
355 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate_pt_bos()
356 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate_pt_bos()
359 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
376 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate_pt_bos()
379 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
381 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
383 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate_pt_bos()
384 vm->evicting = false; in amdgpu_vm_validate_pt_bos()
385 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate_pt_bos()
400 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
405 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_ready()
406 ret = !vm->evicting; in amdgpu_vm_ready()
407 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_ready()
409 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
410 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
411 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
517 bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); in amdgpu_vm_flush()
622 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
628 if (base->vm != vm) in amdgpu_vm_bo_find()
676 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
684 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
685 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
686 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
696 params.vm = vm; in amdgpu_vm_update_pdes()
699 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_update_pdes()
712 r = vm->update_funcs->commit(&params, &vm->last_update); in amdgpu_vm_update_pdes()
717 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
743 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
770 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_update_range() argument
805 params.vm = vm; in amdgpu_vm_update_range()
818 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_update_range()
819 if (vm->evicting) { in amdgpu_vm_update_range()
824 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
827 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
828 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
832 r = vm->update_funcs->prepare(&params, resv, sync_mode); in amdgpu_vm_update_range()
888 r = vm->update_funcs->commit(&params, fence); in amdgpu_vm_update_range()
891 tlb_cb->vm = vm; in amdgpu_vm_update_range()
895 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_update_range()
896 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_update_range()
907 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_update_range()
912 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, in amdgpu_vm_get_memory() argument
917 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
918 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_vm_get_memory()
924 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_vm_get_memory()
930 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_vm_get_memory()
936 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_vm_get_memory()
942 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_vm_get_memory()
948 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_vm_get_memory()
954 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
972 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
985 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update()
1020 vm->root.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1021 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1049 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, in amdgpu_vm_bo_update()
1062 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
1181 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
1198 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
1200 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1227 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
1230 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed()
1236 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1237 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1241 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
1245 r = amdgpu_vm_update_range(adev, vm, false, false, true, resv, in amdgpu_vm_clear_freed()
1249 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
1281 struct amdgpu_vm *vm) in amdgpu_vm_handle_moved() argument
1288 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1289 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1290 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1292 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1298 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1301 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1302 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1305 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1320 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1322 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1343 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
1352 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1385 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
1390 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1395 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
1426 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
1444 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1512 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1550 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
1573 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1578 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1580 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
1600 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
1625 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
1656 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
1665 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
1671 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
1680 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
1702 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
1705 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
1716 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
1723 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
1753 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del() local
1756 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
1760 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_del()
1773 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
1775 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
1779 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1782 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
1786 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1787 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_del()
1811 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
1819 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
1823 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
1824 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
1828 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
1829 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
1852 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
1854 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
1865 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
1993 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
1995 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2001 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2015 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_init() argument
2021 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2023 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2024 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2025 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2026 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2027 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2028 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2029 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2030 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2031 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2032 INIT_LIST_HEAD(&vm->pt_freed); in amdgpu_vm_init()
2033 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); in amdgpu_vm_init()
2036 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2042 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2048 vm->pte_support_ats = false; in amdgpu_vm_init()
2049 vm->is_compute_context = false; in amdgpu_vm_init()
2051 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2055 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2056 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2060 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2061 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2063 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2064 vm->last_update = NULL; in amdgpu_vm_init()
2065 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2066 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2068 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2069 vm->evicting = false; in amdgpu_vm_init()
2071 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2084 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2086 r = amdgpu_vm_pt_clear(adev, vm, root, false); in amdgpu_vm_init()
2090 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2092 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2097 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2102 vm->root.bo = NULL; in amdgpu_vm_init()
2105 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2106 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2107 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_init()
2110 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init()
2134 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
2139 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2144 if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { in amdgpu_vm_make_compute()
2152 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
2153 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
2154 r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), in amdgpu_vm_make_compute()
2161 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2164 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2165 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2169 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2171 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2176 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2178 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2184 r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo)); in amdgpu_vm_make_compute()
2188 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2189 vm->last_update = NULL; in amdgpu_vm_make_compute()
2190 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2193 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); in amdgpu_vm_make_compute()
2198 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2210 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_release_compute() argument
2212 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_release_compute()
2213 vm->is_compute_context = false; in amdgpu_vm_release_compute()
2225 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
2233 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
2235 flush_work(&vm->pt_free_work); in amdgpu_vm_fini()
2237 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2239 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_fini()
2240 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2241 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2242 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2244 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2245 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2246 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2248 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2250 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
2255 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
2258 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_fini()
2261 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2263 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini()
2264 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini()
2266 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2270 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2278 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2280 amdgpu_vmid_free_reserved(adev, vm, i); in amdgpu_vm_fini()
2369 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, in amdgpu_vm_ioctl()
2381 r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); in amdgpu_vm_ioctl()
2385 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); in amdgpu_vm_ioctl()
2389 amdgpu_bo_unreserve(fpriv->vm.root.bo); in amdgpu_vm_ioctl()
2390 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); in amdgpu_vm_ioctl()
2409 struct amdgpu_vm *vm; in amdgpu_vm_get_task_info() local
2414 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_task_info()
2415 if (vm) in amdgpu_vm_get_task_info()
2416 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
2426 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
2428 if (vm->task_info.pid) in amdgpu_vm_set_task_info()
2431 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
2432 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
2437 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2438 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()
2458 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
2462 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2463 if (vm) { in amdgpu_vm_handle_fault()
2464 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2465 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2488 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2489 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2490 vm = NULL; in amdgpu_vm_handle_fault()
2492 if (!vm) in amdgpu_vm_handle_fault()
2521 r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr, in amdgpu_vm_handle_fault()
2526 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
2548 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
2565 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2567 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2576 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2585 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2594 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2603 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2612 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2617 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()