Lines Matching refs:vm

121 	struct amdgpu_vm *vm;  member
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
145 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
148 if (vm->pasid) { in amdgpu_vm_set_pasid()
149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
153 vm->pasid = 0; in amdgpu_vm_set_pasid()
157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
162 vm->pasid = pasid; in amdgpu_vm_set_pasid()
179 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
183 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
185 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
187 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
188 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
200 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
201 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
202 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
215 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
216 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
217 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
231 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
232 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
233 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
247 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
248 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
249 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
265 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
266 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
267 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
277 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) in amdgpu_vm_bo_reset_state_machine() argument
281 spin_lock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
282 list_splice_init(&vm->done, &vm->invalidated); in amdgpu_vm_bo_reset_state_machine()
283 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) in amdgpu_vm_bo_reset_state_machine()
285 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { in amdgpu_vm_bo_reset_state_machine()
289 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_reset_state_machine()
291 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_reset_state_machine()
293 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
307 struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_bo_base_init() argument
309 base->vm = vm; in amdgpu_vm_bo_base_init()
319 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
322 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
324 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
351 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, in amdgpu_vm_lock_pd() argument
355 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, in amdgpu_vm_lock_pd()
369 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
372 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
378 struct amdgpu_vm *vm) in amdgpu_vm_init_entities() argument
382 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
388 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
393 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init_entities()
398 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm) in amdgpu_vm_fini_entities() argument
400 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini_entities()
401 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities()
413 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_generation() argument
417 if (!vm) in amdgpu_vm_generation()
420 result += vm->generation; in amdgpu_vm_generation()
422 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation()
441 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate_pt_bos() argument
450 if (drm_sched_entity_error(&vm->delayed)) { in amdgpu_vm_validate_pt_bos()
451 ++vm->generation; in amdgpu_vm_validate_pt_bos()
452 amdgpu_vm_bo_reset_state_machine(vm); in amdgpu_vm_validate_pt_bos()
453 amdgpu_vm_fini_entities(vm); in amdgpu_vm_validate_pt_bos()
454 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_validate_pt_bos()
459 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
460 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate_pt_bos()
461 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate_pt_bos()
464 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
481 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate_pt_bos()
484 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
486 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
488 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate_pt_bos()
489 vm->evicting = false; in amdgpu_vm_validate_pt_bos()
490 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate_pt_bos()
505 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
510 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_ready()
511 ret = !vm->evicting; in amdgpu_vm_ready()
512 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_ready()
514 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
515 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
516 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
714 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
720 if (base->vm != vm) in amdgpu_vm_bo_find()
768 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
776 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
777 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
778 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
788 params.vm = vm; in amdgpu_vm_update_pdes()
791 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_update_pdes()
804 r = vm->update_funcs->commit(&params, &vm->last_update); in amdgpu_vm_update_pdes()
809 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
835 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
862 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_update_range() argument
897 params.vm = vm; in amdgpu_vm_update_range()
910 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_update_range()
911 if (vm->evicting) { in amdgpu_vm_update_range()
916 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
919 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
920 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
924 r = vm->update_funcs->prepare(&params, resv, sync_mode); in amdgpu_vm_update_range()
982 r = vm->update_funcs->commit(&params, fence); in amdgpu_vm_update_range()
985 tlb_cb->vm = vm; in amdgpu_vm_update_range()
989 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_update_range()
990 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_update_range()
1001 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_update_range()
1009 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_get_memory() local
1019 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_get_memory()
1024 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_get_memory()
1028 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, in amdgpu_vm_get_memory() argument
1033 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
1034 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) in amdgpu_vm_get_memory()
1037 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) in amdgpu_vm_get_memory()
1040 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) in amdgpu_vm_get_memory()
1043 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) in amdgpu_vm_get_memory()
1046 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) in amdgpu_vm_get_memory()
1049 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) in amdgpu_vm_get_memory()
1051 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
1070 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1083 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update()
1119 vm->root.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1120 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1148 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, in amdgpu_vm_bo_update()
1161 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
1280 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
1297 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
1299 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1326 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
1329 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed()
1335 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1336 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1340 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
1344 r = amdgpu_vm_update_range(adev, vm, false, false, true, resv, in amdgpu_vm_clear_freed()
1348 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
1380 struct amdgpu_vm *vm) in amdgpu_vm_handle_moved() argument
1387 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1388 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1389 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1391 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1397 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1400 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1401 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1404 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1419 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1421 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1442 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
1451 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1485 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
1490 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1495 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
1526 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
1544 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1612 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1650 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
1673 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1678 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1680 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
1700 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
1725 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
1756 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
1765 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
1773 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
1777 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_clear_mappings()
1788 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
1792 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_clear_mappings()
1814 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
1817 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
1828 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
1835 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
1865 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del() local
1868 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
1872 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_del()
1885 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
1887 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
1891 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1894 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
1898 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1899 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_del()
1923 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
1931 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
1935 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
1936 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
1940 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
1941 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
1964 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
1966 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
1977 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
2105 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2107 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2113 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2128 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) in amdgpu_vm_init() argument
2134 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2136 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2137 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2138 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2139 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2140 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2141 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2142 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2143 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2144 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2145 INIT_LIST_HEAD(&vm->pt_freed); in amdgpu_vm_init()
2146 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); in amdgpu_vm_init()
2148 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_init()
2152 vm->pte_support_ats = false; in amdgpu_vm_init()
2153 vm->is_compute_context = false; in amdgpu_vm_init()
2155 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2159 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2160 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2164 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2165 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2167 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2169 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_init()
2170 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2171 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2172 vm->generation = 0; in amdgpu_vm_init()
2174 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2175 vm->evicting = false; in amdgpu_vm_init()
2177 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2190 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2192 r = amdgpu_vm_pt_clear(adev, vm, root, false); in amdgpu_vm_init()
2196 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2198 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2203 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2208 vm->root.bo = NULL; in amdgpu_vm_init()
2211 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2212 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2213 amdgpu_vm_fini_entities(vm); in amdgpu_vm_init()
2237 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
2242 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2249 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
2251 if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { in amdgpu_vm_make_compute()
2256 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
2257 r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), in amdgpu_vm_make_compute()
2264 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2267 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2268 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2272 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2274 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2279 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2280 r = amdgpu_vm_pt_map_tables(adev, vm); in amdgpu_vm_make_compute()
2285 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2288 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2289 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_make_compute()
2290 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2293 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); in amdgpu_vm_make_compute()
2298 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2310 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_release_compute() argument
2312 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_release_compute()
2313 vm->is_compute_context = false; in amdgpu_vm_release_compute()
2325 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
2333 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
2335 flush_work(&vm->pt_free_work); in amdgpu_vm_fini()
2337 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2339 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_fini()
2340 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2341 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2342 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2344 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2345 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2346 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2348 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2350 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
2355 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
2358 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_fini()
2361 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2363 amdgpu_vm_fini_entities(vm); in amdgpu_vm_fini()
2365 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2369 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2377 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2380 if (vm->reserved_vmid[i]) { in amdgpu_vm_fini()
2382 vm->reserved_vmid[i] = false; in amdgpu_vm_fini()
2476 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2478 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; in amdgpu_vm_ioctl()
2483 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2485 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; in amdgpu_vm_ioctl()
2505 struct amdgpu_vm *vm; in amdgpu_vm_get_task_info() local
2510 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_task_info()
2511 if (vm) in amdgpu_vm_get_task_info()
2512 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
2522 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
2524 if (vm->task_info.pid) in amdgpu_vm_set_task_info()
2527 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
2528 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
2533 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2534 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()
2558 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
2562 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2563 if (vm) { in amdgpu_vm_handle_fault()
2564 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2565 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2588 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2589 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2590 vm = NULL; in amdgpu_vm_handle_fault()
2592 if (!vm) in amdgpu_vm_handle_fault()
2621 r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr, in amdgpu_vm_handle_fault()
2626 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
2648 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
2665 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2667 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2676 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2685 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2694 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2703 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2712 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2717 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()