Lines Matching full:vm

50  * for the entire GPU, there are multiple VM page tables active
51 * at any given time. The VM page tables can contain a mix
55 * Each VM has an ID associated with it and there is a page table
92 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
95 * @vm: amdgpu_vm pointer
96 * @pasid: the pasid the VM is using on this GPU
98 * Set the pasid this VM is using on this GPU, can also be used to remove the
102 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
107 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
110 if (vm->pasid) { in amdgpu_vm_set_pasid()
111 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
115 vm->pasid = 0; in amdgpu_vm_set_pasid()
119 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
124 vm->pasid = pasid; in amdgpu_vm_set_pasid()
132 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
136 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_lock() argument
138 mutex_lock(&vm->eviction_lock); in amdgpu_vm_eviction_lock()
139 vm->saved_flags = memalloc_noreclaim_save(); in amdgpu_vm_eviction_lock()
142 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_trylock() argument
144 if (mutex_trylock(&vm->eviction_lock)) { in amdgpu_vm_eviction_trylock()
145 vm->saved_flags = memalloc_noreclaim_save(); in amdgpu_vm_eviction_trylock()
151 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_unlock() argument
153 memalloc_noreclaim_restore(vm->saved_flags); in amdgpu_vm_eviction_unlock()
154 mutex_unlock(&vm->eviction_lock); in amdgpu_vm_eviction_unlock()
264 * State for PDs/PTs and per VM BOs which are not at the location they should
269 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
274 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
276 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
283 * State for per VM BOs which are moved, but that change is not yet reflected
288 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
296 * State for PDs/PTs and per VM BOs which have gone through the state machine
301 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
315 spin_lock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_invalidated()
316 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
317 spin_unlock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_invalidated()
331 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
346 spin_lock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_done()
347 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
348 spin_unlock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_done()
352 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
354 * @base: base structure for tracking BO usage in a VM
355 * @vm: vm to which bo is to be added
362 struct amdgpu_vm *vm, in amdgpu_vm_bo_base_init() argument
365 base->vm = vm; in amdgpu_vm_bo_base_init()
375 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
378 vm->bulk_moveable = false; in amdgpu_vm_bo_base_init()
389 * we checked all the prerequisites, but it looks like this per vm bo in amdgpu_vm_bo_base_init()
391 * is validated on next vm use to avoid fault. in amdgpu_vm_bo_base_init()
428 * @vm: amdgpu_vm structure
435 struct amdgpu_vm *vm, uint64_t start, in amdgpu_vm_pt_start() argument
440 cursor->entry = &vm->root; in amdgpu_vm_pt_start()
554 * @vm: amdgpu_vm structure
561 struct amdgpu_vm *vm, in amdgpu_vm_pt_first_dfs() argument
568 amdgpu_vm_pt_start(adev, vm, 0, cursor); in amdgpu_vm_pt_first_dfs()
612 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ argument
613 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
619 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
621 * @vm: vm providing the BOs
628 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, in amdgpu_vm_get_pd_bo() argument
633 entry->tv.bo = &vm->root.bo->tbo; in amdgpu_vm_get_pd_bo()
634 /* Two for VM updates, one for TTM and one for the CS job */ in amdgpu_vm_get_pd_bo()
663 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_del_from_lru_notify() local
665 if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify()
666 vm->bulk_moveable = false; in amdgpu_vm_del_from_lru_notify()
674 * @vm: vm providing the BOs
680 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
684 if (vm->bulk_moveable) { in amdgpu_vm_move_to_lru_tail()
686 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
691 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); in amdgpu_vm_move_to_lru_tail()
694 list_for_each_entry(bo_base, &vm->idle, vm_status) { in amdgpu_vm_move_to_lru_tail()
702 &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
706 &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
710 vm->bulk_moveable = true; in amdgpu_vm_move_to_lru_tail()
717 * @vm: vm providing the BOs
726 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate_pt_bos() argument
733 vm->bulk_moveable &= list_empty(&vm->evicted); in amdgpu_vm_validate_pt_bos()
735 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { in amdgpu_vm_validate_pt_bos()
751 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate_pt_bos()
756 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate_pt_bos()
757 vm->evicting = false; in amdgpu_vm_validate_pt_bos()
758 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate_pt_bos()
764 * amdgpu_vm_ready - check VM is ready for updates
766 * @vm: VM to check
768 * Check if all VM PDs/PTs are ready for updates
773 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
775 return list_empty(&vm->evicted); in amdgpu_vm_ready()
782 * @vm: VM to clear BO from
792 struct amdgpu_vm *vm, in amdgpu_vm_clear_bo() argument
815 if (!vm->pte_support_ats) { in amdgpu_vm_clear_bo()
828 if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { in amdgpu_vm_clear_bo()
848 r = vm->update_funcs->map_table(vmbo); in amdgpu_vm_clear_bo()
854 params.vm = vm; in amdgpu_vm_clear_bo()
857 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_clear_bo()
872 r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries, in amdgpu_vm_clear_bo()
895 r = vm->update_funcs->update(&params, vmbo, addr, 0, entries, in amdgpu_vm_clear_bo()
901 return vm->update_funcs->commit(&params, NULL); in amdgpu_vm_clear_bo()
908 * @vm: requesting vm
914 struct amdgpu_vm *vm, in amdgpu_vm_pt_create() argument
940 if (vm->use_cpu_for_update) in amdgpu_vm_pt_create()
945 if (vm->root.bo) in amdgpu_vm_pt_create()
946 bp.resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_pt_create()
953 if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) { in amdgpu_vm_pt_create()
990 * @vm: VM to allocate page tables for
1001 struct amdgpu_vm *vm, in amdgpu_vm_alloc_pts() argument
1013 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); in amdgpu_vm_alloc_pts()
1022 amdgpu_vm_bo_base_init(entry, vm, pt_bo); in amdgpu_vm_alloc_pts()
1023 r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); in amdgpu_vm_alloc_pts()
1057 * @vm: amdgpu vm structure
1063 struct amdgpu_vm *vm, in amdgpu_vm_free_pts() argument
1069 vm->bulk_moveable = false; in amdgpu_vm_free_pts()
1071 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) in amdgpu_vm_free_pts()
1079 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
1094 /* Compute has a VM bug for GFX version < 7. in amdgpu_vm_check_compute_bug()
1095 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ in amdgpu_vm_check_compute_bug()
1150 * amdgpu_vm_flush - hardware flush the vm
1156 * Emit a VM flush when it is necessary.
1179 bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); in amdgpu_vm_flush()
1269 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1271 * @vm: requested vm
1274 * Find @bo inside the requested vm.
1275 * Search inside the @bos vm list for the requested vm
1283 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
1289 if (base->vm != vm) in amdgpu_vm_bo_find()
1328 * @vm: requested vm
1334 struct amdgpu_vm *vm, in amdgpu_vm_update_pde() argument
1348 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, in amdgpu_vm_update_pde()
1356 * @vm: related vm
1361 struct amdgpu_vm *vm) in amdgpu_vm_invalidate_pds() argument
1366 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) in amdgpu_vm_invalidate_pds()
1375 * @vm: requested vm
1384 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
1389 if (list_empty(&vm->relocated)) in amdgpu_vm_update_pdes()
1394 params.vm = vm; in amdgpu_vm_update_pdes()
1397 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_update_pdes()
1401 while (!list_empty(&vm->relocated)) { in amdgpu_vm_update_pdes()
1404 entry = list_first_entry(&vm->relocated, in amdgpu_vm_update_pdes()
1409 r = amdgpu_vm_update_pde(&params, vm, entry); in amdgpu_vm_update_pdes()
1414 r = vm->update_funcs->commit(&params, &vm->last_update); in amdgpu_vm_update_pdes()
1420 amdgpu_vm_invalidate_pds(adev, vm); in amdgpu_vm_update_pdes()
1448 params->vm->update_funcs->update(params, pt, pe, addr, count, incr, in amdgpu_vm_update_flags()
1541 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); in amdgpu_vm_update_ptes()
1551 r = amdgpu_vm_alloc_pts(params->adev, params->vm, in amdgpu_vm_update_ptes()
1614 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_update_ptes() local
1626 vm->task_info.pid, in amdgpu_vm_update_ptes()
1627 vm->immediate.fence_context); in amdgpu_vm_update_ptes()
1656 amdgpu_vm_free_pts(adev, params->vm, &cursor); in amdgpu_vm_update_ptes()
1671 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1673 * @adev: amdgpu_device pointer of the VM
1675 * @vm: requested vm
1695 struct amdgpu_vm *vm, bool immediate, in amdgpu_vm_bo_update_mapping() argument
1714 params.vm = vm; in amdgpu_vm_bo_update_mapping()
1719 /* Implicitly sync to command submissions in the same VM before in amdgpu_vm_bo_update_mapping()
1727 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_bo_update_mapping()
1728 if (vm->evicting) { in amdgpu_vm_bo_update_mapping()
1733 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_bo_update_mapping()
1736 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_bo_update_mapping()
1737 swap(vm->last_unlocked, tmp); in amdgpu_vm_bo_update_mapping()
1741 r = vm->update_funcs->prepare(&params, resv, sync_mode); in amdgpu_vm_bo_update_mapping()
1798 r = vm->update_funcs->commit(&params, fence); in amdgpu_vm_bo_update_mapping()
1804 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_bo_update_mapping()
1809 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, in amdgpu_vm_get_memory() argument
1814 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_vm_get_memory()
1820 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_vm_get_memory()
1826 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_vm_get_memory()
1832 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_vm_get_memory()
1838 spin_lock(&vm->invalidated_lock); in amdgpu_vm_get_memory()
1839 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_vm_get_memory()
1845 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_vm_get_memory()
1851 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_get_memory()
1854 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1857 * @bo_va: requested BO and VM object
1870 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1882 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update()
1913 vm->root.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1914 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1942 r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, in amdgpu_vm_bo_update()
1955 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
2066 * @vm: requested vm
2073 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
2086 * @vm: requested vm
2088 * Register a cleanup callback to disable PRT support after VM dies.
2090 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
2092 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
2123 * @vm: requested vm
2135 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
2138 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed()
2144 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
2145 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
2149 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
2153 r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, in amdgpu_vm_clear_freed()
2157 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
2179 * @vm: requested vm
2189 struct amdgpu_vm *vm) in amdgpu_vm_handle_moved() argument
2196 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_vm_handle_moved()
2197 /* Per VM BOs never need to bo cleared in the page tables */ in amdgpu_vm_handle_moved()
2203 spin_lock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2204 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
2205 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
2208 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2223 spin_lock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2225 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2231 * amdgpu_vm_bo_add - add a bo to a specific vm
2234 * @vm: requested vm
2237 * Add @bo into the requested vm.
2238 * Add @bo to the list of bos associated with the vm
2246 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
2255 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
2287 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
2292 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
2297 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
2299 list_move(&bo_va->base.vm_status, &vm->moved); in amdgpu_vm_bo_insert_map()
2305 * amdgpu_vm_bo_map - map bo inside a vm
2314 * Add a mapping of the BO at the specefied addr into the VM.
2328 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
2346 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
2370 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2379 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2414 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
2434 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2440 * Remove a mapping of the BO at the specefied addr from the VM.
2452 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
2475 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
2480 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
2482 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
2492 * @vm: VM structure to use
2502 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
2527 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
2558 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
2567 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
2573 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
2582 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
2595 * @vm: the requested VM
2604 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
2607 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
2613 * @vm: the requested vm
2618 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
2625 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
2641 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2646 * Remove @bo_va->bo from the requested vm.
2655 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_rmv() local
2659 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_rmv()
2660 vm->bulk_moveable = false; in amdgpu_vm_bo_rmv()
2672 spin_lock(&vm->invalidated_lock); in amdgpu_vm_bo_rmv()
2674 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_bo_rmv()
2678 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_rmv()
2681 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_rmv()
2685 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_rmv()
2686 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_rmv()
2699 * amdgpu_vm_evictable - check if we can evict a VM
2701 * @bo: A page table of the VM.
2703 * Check if it is possible to evict a VM.
2709 /* Page tables of a destroyed VM can go away immediately */ in amdgpu_vm_evictable()
2710 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
2713 /* Don't evict VM page tables while they are busy */ in amdgpu_vm_evictable()
2718 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
2721 /* Don't evict VM page tables while they are updated */ in amdgpu_vm_evictable()
2722 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
2723 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2727 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2728 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2751 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
2753 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
2764 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
2772 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2774 * @vm_size: VM size
2777 * VM page table as power of two
2793 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2796 * @min_vm_size: the minimum vm size in GB if it's set auto
2810 /* adjust vm size first */ in amdgpu_vm_adjust_size()
2814 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", in amdgpu_vm_adjust_size()
2822 /* Optimal VM size depends on the amount of physical in amdgpu_vm_adjust_size()
2829 * - On GFX8 and older, VM space can be segmented for in amdgpu_vm_adjust_size()
2835 * VM size with the given page table size. in amdgpu_vm_adjust_size()
2864 /* block size depends on vm size and hw setup*/ in amdgpu_vm_adjust_size()
2880 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", in amdgpu_vm_adjust_size()
2887 * amdgpu_vm_wait_idle - wait for the VM to become idle
2889 * @vm: VM object to wait for
2890 * @timeout: timeout to wait for VM to become idle
2892 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2894 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, in amdgpu_vm_wait_idle()
2899 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2903 * amdgpu_vm_init - initialize a vm instance
2906 * @vm: requested vm
2908 * Init @vm fields.
2913 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_init() argument
2919 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2921 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2922 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2923 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2924 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2925 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2926 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2927 spin_lock_init(&vm->invalidated_lock); in amdgpu_vm_init()
2928 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2929 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2932 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2938 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2944 vm->pte_support_ats = false; in amdgpu_vm_init()
2945 vm->is_compute_context = false; in amdgpu_vm_init()
2947 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2950 DRM_DEBUG_DRIVER("VM update mode is %s\n", in amdgpu_vm_init()
2951 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2952 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2954 "CPU update of VM recommended only for large BAR system\n"); in amdgpu_vm_init()
2956 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2957 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2959 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2960 vm->last_update = NULL; in amdgpu_vm_init()
2961 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2963 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2964 vm->evicting = false; in amdgpu_vm_init()
2966 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2979 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2981 r = amdgpu_vm_clear_bo(adev, vm, root, false); in amdgpu_vm_init()
2985 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2987 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2992 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2997 vm->root.bo = NULL; in amdgpu_vm_init()
3000 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
3001 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_init()
3004 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init()
3010 * amdgpu_vm_check_clean_reserved - check if a VM is clean
3013 * @vm: the VM to check
3017 * VM
3020 * 0 if this VM is clean
3023 struct amdgpu_vm *vm) in amdgpu_vm_check_clean_reserved() argument
3030 if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) in amdgpu_vm_check_clean_reserved()
3038 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3041 * @vm: requested vm
3046 * Changes the following VM parameters:
3056 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
3061 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
3066 r = amdgpu_vm_check_clean_reserved(adev, vm); in amdgpu_vm_make_compute()
3073 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
3074 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
3075 r = amdgpu_vm_clear_bo(adev, vm, in amdgpu_vm_make_compute()
3076 to_amdgpu_bo_vm(vm->root.bo), in amdgpu_vm_make_compute()
3082 /* Update VM state */ in amdgpu_vm_make_compute()
3083 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
3085 DRM_DEBUG_DRIVER("VM update mode is %s\n", in amdgpu_vm_make_compute()
3086 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
3087 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
3089 "CPU update of VM recommended only for large BAR system\n"); in amdgpu_vm_make_compute()
3091 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
3093 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
3098 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
3100 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
3102 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
3103 vm->last_update = NULL; in amdgpu_vm_make_compute()
3104 vm->is_compute_context = true; in amdgpu_vm_make_compute()
3106 /* Free the shadow bo for compute VM */ in amdgpu_vm_make_compute()
3107 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); in amdgpu_vm_make_compute()
3112 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
3117 * amdgpu_vm_release_compute - release a compute vm
3119 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3122 * pasid from vm. Compute should stop use of vm after this call.
3124 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_release_compute() argument
3126 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_release_compute()
3127 vm->is_compute_context = false; in amdgpu_vm_release_compute()
3131 * amdgpu_vm_fini - tear down a vm instance
3134 * @vm: requested vm
3136 * Tear down @vm.
3137 * Unbind the VM and remove all bos from the vm bo list
3139 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
3146 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
3148 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
3150 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_fini()
3151 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
3152 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
3154 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
3156 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
3161 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
3164 amdgpu_vm_free_pts(adev, vm, NULL); in amdgpu_vm_fini()
3167 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
3169 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini()
3170 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini()
3172 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
3173 dev_err(adev->dev, "still active bo inside vm\n"); in amdgpu_vm_fini()
3176 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
3184 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
3186 amdgpu_vmid_free_reserved(adev, vm, i); in amdgpu_vm_fini()
3190 * amdgpu_vm_manager_init - init the VM manager
3194 * Initialize the VM manager structures
3217 * Compute VM tables will be updated by CPU in amdgpu_vm_manager_init()
3236 * amdgpu_vm_manager_fini - cleanup VM manager
3240 * Cleanup the VM manager and free resources.
3251 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3271 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, in amdgpu_vm_ioctl()
3280 /* Wait vm idle to make sure the vmid set in SPM_VMID is in amdgpu_vm_ioctl()
3283 r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); in amdgpu_vm_ioctl()
3287 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); in amdgpu_vm_ioctl()
3291 amdgpu_bo_unreserve(fpriv->vm.root.bo); in amdgpu_vm_ioctl()
3292 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); in amdgpu_vm_ioctl()
3305 * @pasid: PASID identifier for VM
3311 struct amdgpu_vm *vm; in amdgpu_vm_get_task_info() local
3316 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_task_info()
3317 if (vm) in amdgpu_vm_get_task_info()
3318 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
3326 * @vm: vm for which to set the info
3328 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
3330 if (vm->task_info.pid) in amdgpu_vm_set_task_info()
3333 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
3334 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
3339 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
3340 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()
3344 * amdgpu_vm_handle_fault - graceful handling of VM faults.
3346 * @pasid: PASID of the VM
3350 * Try to gracefully handle a VM fault. Return true if the fault was handled and
3360 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
3364 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
3365 if (vm) { in amdgpu_vm_handle_fault()
3366 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
3367 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
3388 /* Double check that the VM still exists */ in amdgpu_vm_handle_fault()
3390 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
3391 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
3392 vm = NULL; in amdgpu_vm_handle_fault()
3394 if (!vm) in amdgpu_vm_handle_fault()
3424 r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr, in amdgpu_vm_handle_fault()
3430 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
3445 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
3447 * @vm: Requested VM for printing BO info
3450 * Print BO information in debugfs file for the VM
3452 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
3470 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3479 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3488 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3497 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3506 spin_lock(&vm->invalidated_lock); in amdgpu_debugfs_vm_bo_info()
3507 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3516 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3521 spin_unlock(&vm->invalidated_lock); in amdgpu_debugfs_vm_bo_info()