Lines Matching +full:compute +full:- +full:cb
29 #include <linux/dma-fence-array.h>
32 #include <linux/dma-buf.h>
66 #define START(node) ((node)->start)
67 #define LAST(node) ((node)->last)
76 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
86 * @cb: callback
88 struct dma_fence_cb cb; member
92 * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence
101 * @cb: callback
103 struct dma_fence_cb cb; member
107 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
122 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
125 if (vm->pasid) { in amdgpu_vm_set_pasid()
126 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
130 vm->pasid = 0; in amdgpu_vm_set_pasid()
134 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
139 vm->pasid = pasid; in amdgpu_vm_set_pasid()
147 * amdgpu_vm_bo_evicted - vm_bo is evicted
156 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted()
157 struct amdgpu_bo *bo = vm_bo->bo; in amdgpu_vm_bo_evicted()
159 vm_bo->moved = true; in amdgpu_vm_bo_evicted()
160 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
161 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_evicted()
162 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
164 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
165 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
168 * amdgpu_vm_bo_moved - vm_bo is moved
177 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
178 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
179 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
183 * amdgpu_vm_bo_idle - vm_bo is idle
192 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
193 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
194 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
195 vm_bo->moved = false; in amdgpu_vm_bo_idle()
199 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
208 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
209 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
210 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
214 * amdgpu_vm_bo_relocated - vm_bo is reloacted
223 if (vm_bo->bo->parent) { in amdgpu_vm_bo_relocated()
224 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
225 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
226 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
233 * amdgpu_vm_bo_done - vm_bo is done
242 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
243 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
244 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
248 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
260 base->vm = vm; in amdgpu_vm_bo_base_init()
261 base->bo = bo; in amdgpu_vm_bo_base_init()
262 base->next = NULL; in amdgpu_vm_bo_base_init()
263 INIT_LIST_HEAD(&base->vm_status); in amdgpu_vm_bo_base_init()
267 base->next = bo->vm_bo; in amdgpu_vm_bo_base_init()
268 bo->vm_bo = base; in amdgpu_vm_bo_base_init()
270 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
273 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
275 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
276 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) in amdgpu_vm_bo_base_init()
281 if (bo->preferred_domains & in amdgpu_vm_bo_base_init()
282 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) in amdgpu_vm_bo_base_init()
294 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
307 entry->priority = 0; in amdgpu_vm_get_pd_bo()
308 entry->tv.bo = &vm->root.bo->tbo; in amdgpu_vm_get_pd_bo()
310 entry->tv.num_shared = 4; in amdgpu_vm_get_pd_bo()
311 entry->user_pages = NULL; in amdgpu_vm_get_pd_bo()
312 list_add(&entry->tv.head, validated); in amdgpu_vm_get_pd_bo()
316 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
327 spin_lock(&adev->mman.bdev.lru_lock); in amdgpu_vm_move_to_lru_tail()
328 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
329 spin_unlock(&adev->mman.bdev.lru_lock); in amdgpu_vm_move_to_lru_tail()
333 * amdgpu_vm_validate_pt_bos - validate the page table BOs
354 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
355 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate_pt_bos()
356 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate_pt_bos()
359 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
361 bo = bo_base->bo; in amdgpu_vm_validate_pt_bos()
373 if (bo->tbo.type != ttm_bo_type_kernel) { in amdgpu_vm_validate_pt_bos()
376 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate_pt_bos()
379 spin_lock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
381 spin_unlock(&vm->status_lock); in amdgpu_vm_validate_pt_bos()
384 vm->evicting = false; in amdgpu_vm_validate_pt_bos()
391 * amdgpu_vm_ready - check VM is ready for updates
406 ret = !vm->evicting; in amdgpu_vm_ready()
409 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
410 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
411 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
417 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
432 /* Compute has a VM bug for GFX version < 7. in amdgpu_vm_check_compute_bug()
433 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ in amdgpu_vm_check_compute_bug()
434 if (ip_block->version->major <= 7) in amdgpu_vm_check_compute_bug()
436 else if (ip_block->version->major == 8) in amdgpu_vm_check_compute_bug()
437 if (adev->gfx.mec_fw_version < 673) in amdgpu_vm_check_compute_bug()
441 for (i = 0; i < adev->num_rings; i++) { in amdgpu_vm_check_compute_bug()
442 ring = adev->rings[i]; in amdgpu_vm_check_compute_bug()
443 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) in amdgpu_vm_check_compute_bug()
444 /* only compute rings */ in amdgpu_vm_check_compute_bug()
445 ring->has_compute_vm_bug = has_compute_vm_bug; in amdgpu_vm_check_compute_bug()
447 ring->has_compute_vm_bug = false; in amdgpu_vm_check_compute_bug()
452 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
463 struct amdgpu_device *adev = ring->adev; in amdgpu_vm_need_pipeline_sync()
464 unsigned vmhub = ring->funcs->vmhub; in amdgpu_vm_need_pipeline_sync()
465 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vm_need_pipeline_sync()
468 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; in amdgpu_vm_need_pipeline_sync()
470 if (job->vmid == 0) in amdgpu_vm_need_pipeline_sync()
472 id = &id_mgr->ids[job->vmid]; in amdgpu_vm_need_pipeline_sync()
473 gds_switch_needed = ring->funcs->emit_gds_switch && ( in amdgpu_vm_need_pipeline_sync()
474 id->gds_base != job->gds_base || in amdgpu_vm_need_pipeline_sync()
475 id->gds_size != job->gds_size || in amdgpu_vm_need_pipeline_sync()
476 id->gws_base != job->gws_base || in amdgpu_vm_need_pipeline_sync()
477 id->gws_size != job->gws_size || in amdgpu_vm_need_pipeline_sync()
478 id->oa_base != job->oa_base || in amdgpu_vm_need_pipeline_sync()
479 id->oa_size != job->oa_size); in amdgpu_vm_need_pipeline_sync()
488 * amdgpu_vm_flush - hardware flush the vm
502 struct amdgpu_device *adev = ring->adev; in amdgpu_vm_flush()
503 unsigned vmhub = ring->funcs->vmhub; in amdgpu_vm_flush()
504 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vm_flush()
505 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; in amdgpu_vm_flush()
506 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( in amdgpu_vm_flush()
507 id->gds_base != job->gds_base || in amdgpu_vm_flush()
508 id->gds_size != job->gds_size || in amdgpu_vm_flush()
509 id->gws_base != job->gws_base || in amdgpu_vm_flush()
510 id->gws_size != job->gws_size || in amdgpu_vm_flush()
511 id->oa_base != job->oa_base || in amdgpu_vm_flush()
512 id->oa_size != job->oa_size); in amdgpu_vm_flush()
513 bool vm_flush_needed = job->vm_needs_flush; in amdgpu_vm_flush()
517 bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); in amdgpu_vm_flush()
520 if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) in amdgpu_vm_flush()
521 adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); in amdgpu_vm_flush()
529 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
530 if (id->pasid != job->pasid || !id->pasid_mapping || in amdgpu_vm_flush()
531 !dma_fence_is_signaled(id->pasid_mapping)) in amdgpu_vm_flush()
533 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
535 gds_switch_needed &= !!ring->funcs->emit_gds_switch; in amdgpu_vm_flush()
536 vm_flush_needed &= !!ring->funcs->emit_vm_flush && in amdgpu_vm_flush()
537 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; in amdgpu_vm_flush()
538 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && in amdgpu_vm_flush()
539 ring->funcs->emit_wreg; in amdgpu_vm_flush()
544 if (ring->funcs->init_cond_exec) in amdgpu_vm_flush()
551 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); in amdgpu_vm_flush()
552 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); in amdgpu_vm_flush()
556 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); in amdgpu_vm_flush()
565 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
566 dma_fence_put(id->last_flush); in amdgpu_vm_flush()
567 id->last_flush = dma_fence_get(fence); in amdgpu_vm_flush()
568 id->current_gpu_reset_count = in amdgpu_vm_flush()
569 atomic_read(&adev->gpu_reset_counter); in amdgpu_vm_flush()
570 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
574 mutex_lock(&id_mgr->lock); in amdgpu_vm_flush()
575 id->pasid = job->pasid; in amdgpu_vm_flush()
576 dma_fence_put(id->pasid_mapping); in amdgpu_vm_flush()
577 id->pasid_mapping = dma_fence_get(fence); in amdgpu_vm_flush()
578 mutex_unlock(&id_mgr->lock); in amdgpu_vm_flush()
582 if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && in amdgpu_vm_flush()
584 id->gds_base = job->gds_base; in amdgpu_vm_flush()
585 id->gds_size = job->gds_size; in amdgpu_vm_flush()
586 id->gws_base = job->gws_base; in amdgpu_vm_flush()
587 id->gws_size = job->gws_size; in amdgpu_vm_flush()
588 id->oa_base = job->oa_base; in amdgpu_vm_flush()
589 id->oa_size = job->oa_size; in amdgpu_vm_flush()
590 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, in amdgpu_vm_flush()
591 job->gds_size, job->gws_base, in amdgpu_vm_flush()
592 job->gws_size, job->oa_base, in amdgpu_vm_flush()
593 job->oa_size); in amdgpu_vm_flush()
596 if (ring->funcs->patch_cond_exec) in amdgpu_vm_flush()
600 if (ring->funcs->emit_switch_buffer) { in amdgpu_vm_flush()
608 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
627 for (base = bo->vm_bo; base; base = base->next) { in amdgpu_vm_bo_find()
628 if (base->vm != vm) in amdgpu_vm_bo_find()
637 * amdgpu_vm_map_gart - Resolve gart mapping of addr
664 * amdgpu_vm_update_pdes - make sure that all directories are valid
684 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
685 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
686 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
692 return -ENODEV; in amdgpu_vm_update_pdes()
699 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); in amdgpu_vm_update_pdes()
705 flush_tlb_needed |= entry->moved; in amdgpu_vm_update_pdes()
712 r = vm->update_funcs->commit(¶ms, &vm->last_update); in amdgpu_vm_update_pdes()
717 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
731 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
733 * @cb: the callback structure
738 struct dma_fence_cb *cb) in amdgpu_vm_tlb_seq_cb() argument
742 tlb_cb = container_of(cb, typeof(*tlb_cb), cb); in amdgpu_vm_tlb_seq_cb()
743 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
748 * amdgpu_vm_update_range - update a range in the vm page table
784 return -ENODEV; in amdgpu_vm_update_range()
788 r = -ENOMEM; in amdgpu_vm_update_range()
793 * heavy-weight flush TLB unconditionally. in amdgpu_vm_update_range()
795 flush_tlb |= adev->gmc.xgmi.num_physical_nodes && in amdgpu_vm_update_range()
796 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); in amdgpu_vm_update_range()
801 flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); in amdgpu_vm_update_range()
819 if (vm->evicting) { in amdgpu_vm_update_range()
820 r = -EBUSY; in amdgpu_vm_update_range()
824 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
827 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
828 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
832 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); in amdgpu_vm_update_range()
837 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); in amdgpu_vm_update_range()
858 pages_addr[idx - 1] + PAGE_SIZE)) in amdgpu_vm_update_range()
888 r = vm->update_funcs->commit(¶ms, fence); in amdgpu_vm_update_range()
891 tlb_cb->vm = vm; in amdgpu_vm_update_range()
893 !dma_fence_add_callback(*fence, &tlb_cb->cb, in amdgpu_vm_update_range()
895 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_update_range()
896 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_update_range()
898 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); in amdgpu_vm_update_range()
917 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
918 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_vm_get_memory()
919 if (!bo_va->base.bo) in amdgpu_vm_get_memory()
921 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, in amdgpu_vm_get_memory()
924 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_vm_get_memory()
925 if (!bo_va->base.bo) in amdgpu_vm_get_memory()
927 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, in amdgpu_vm_get_memory()
930 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_vm_get_memory()
931 if (!bo_va->base.bo) in amdgpu_vm_get_memory()
933 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, in amdgpu_vm_get_memory()
936 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_vm_get_memory()
937 if (!bo_va->base.bo) in amdgpu_vm_get_memory()
939 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, in amdgpu_vm_get_memory()
942 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_vm_get_memory()
943 if (!bo_va->base.bo) in amdgpu_vm_get_memory()
945 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, in amdgpu_vm_get_memory()
948 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_vm_get_memory()
949 if (!bo_va->base.bo) in amdgpu_vm_get_memory()
951 amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, in amdgpu_vm_get_memory()
954 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
957 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
966 * 0 for success, -EINVAL for failure.
971 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_update()
972 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update()
985 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update()
987 struct drm_gem_object *obj = &bo->tbo.base; in amdgpu_vm_bo_update()
989 resv = bo->tbo.base.resv; in amdgpu_vm_bo_update()
990 if (obj->import_attach && bo_va->is_xgmi) { in amdgpu_vm_bo_update()
991 struct dma_buf *dma_buf = obj->import_attach->dmabuf; in amdgpu_vm_bo_update()
992 struct drm_gem_object *gobj = dma_buf->priv; in amdgpu_vm_bo_update()
995 if (abo->tbo.resource->mem_type == TTM_PL_VRAM) in amdgpu_vm_bo_update()
998 mem = bo->tbo.resource; in amdgpu_vm_bo_update()
999 if (mem->mem_type == TTM_PL_TT || in amdgpu_vm_bo_update()
1000 mem->mem_type == AMDGPU_PL_PREEMPT) in amdgpu_vm_bo_update()
1001 pages_addr = bo->tbo.ttm->dma_address; in amdgpu_vm_bo_update()
1007 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); in amdgpu_vm_bo_update()
1012 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_vm_bo_update()
1013 vram_base = bo_adev->vm_manager.vram_base_offset; in amdgpu_vm_bo_update()
1019 if (clear || (bo && bo->tbo.base.resv == in amdgpu_vm_bo_update()
1020 vm->root.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1021 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1023 last_update = &bo_va->last_pt_update; in amdgpu_vm_bo_update()
1025 if (!clear && bo_va->base.moved) { in amdgpu_vm_bo_update()
1027 list_splice_init(&bo_va->valids, &bo_va->invalids); in amdgpu_vm_bo_update()
1029 } else if (bo_va->cleared != clear) { in amdgpu_vm_bo_update()
1030 list_splice_init(&bo_va->valids, &bo_va->invalids); in amdgpu_vm_bo_update()
1033 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_update()
1036 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here in amdgpu_vm_bo_update()
1039 if (!(mapping->flags & AMDGPU_PTE_READABLE)) in amdgpu_vm_bo_update()
1041 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) in amdgpu_vm_bo_update()
1050 resv, mapping->start, mapping->last, in amdgpu_vm_bo_update()
1051 update_flags, mapping->offset, in amdgpu_vm_bo_update()
1062 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
1063 uint32_t mem_type = bo->tbo.resource->mem_type; in amdgpu_vm_bo_update()
1065 if (!(bo->preferred_domains & in amdgpu_vm_bo_update()
1067 amdgpu_vm_bo_evicted(&bo_va->base); in amdgpu_vm_bo_update()
1069 amdgpu_vm_bo_idle(&bo_va->base); in amdgpu_vm_bo_update()
1071 amdgpu_vm_bo_done(&bo_va->base); in amdgpu_vm_bo_update()
1074 list_splice_init(&bo_va->invalids, &bo_va->valids); in amdgpu_vm_bo_update()
1075 bo_va->cleared = clear; in amdgpu_vm_bo_update()
1076 bo_va->base.moved = false; in amdgpu_vm_bo_update()
1079 list_for_each_entry(mapping, &bo_va->valids, list) in amdgpu_vm_bo_update()
1087 * amdgpu_vm_update_prt_state - update the global PRT state
1096 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); in amdgpu_vm_update_prt_state()
1097 enable = !!atomic_read(&adev->vm_manager.num_prt_users); in amdgpu_vm_update_prt_state()
1098 adev->gmc.gmc_funcs->set_prt(adev, enable); in amdgpu_vm_update_prt_state()
1099 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); in amdgpu_vm_update_prt_state()
1103 * amdgpu_vm_prt_get - add a PRT user
1109 if (!adev->gmc.gmc_funcs->set_prt) in amdgpu_vm_prt_get()
1112 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) in amdgpu_vm_prt_get()
1117 * amdgpu_vm_prt_put - drop a PRT user
1123 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) in amdgpu_vm_prt_put()
1128 * amdgpu_vm_prt_cb - callback for updating the PRT status
1135 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); in amdgpu_vm_prt_cb() local
1137 amdgpu_vm_prt_put(cb->adev); in amdgpu_vm_prt_cb()
1138 kfree(cb); in amdgpu_vm_prt_cb()
1142 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1150 struct amdgpu_prt_cb *cb; in amdgpu_vm_add_prt_cb() local
1152 if (!adev->gmc.gmc_funcs->set_prt) in amdgpu_vm_add_prt_cb()
1155 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); in amdgpu_vm_add_prt_cb()
1156 if (!cb) { in amdgpu_vm_add_prt_cb()
1163 cb->adev = adev; in amdgpu_vm_add_prt_cb()
1164 if (!fence || dma_fence_add_callback(fence, &cb->cb, in amdgpu_vm_add_prt_cb()
1166 amdgpu_vm_prt_cb(fence, &cb->cb); in amdgpu_vm_add_prt_cb()
1171 * amdgpu_vm_free_mapping - free a mapping
1185 if (mapping->flags & AMDGPU_PTE_PRT) in amdgpu_vm_free_mapping()
1191 * amdgpu_vm_prt_fini - finish all prt mappings
1200 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1212 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1230 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed()
1236 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1237 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1239 list_del(&mapping->list); in amdgpu_vm_clear_freed()
1241 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
1242 mapping->start < AMDGPU_GMC_HOLE_START) in amdgpu_vm_clear_freed()
1246 mapping->start, mapping->last, in amdgpu_vm_clear_freed()
1268 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1288 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1289 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1290 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1292 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1298 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1301 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1302 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1304 resv = bo_va->base.bo->tbo.base.resv; in amdgpu_vm_handle_moved()
1305 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1320 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1322 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1328 * amdgpu_vm_bo_add - add a bo to a specific vm
1352 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1354 bo_va->ref_count = 1; in amdgpu_vm_bo_add()
1355 INIT_LIST_HEAD(&bo_va->valids); in amdgpu_vm_bo_add()
1356 INIT_LIST_HEAD(&bo_va->invalids); in amdgpu_vm_bo_add()
1361 dma_resv_assert_held(bo->tbo.base.resv); in amdgpu_vm_bo_add()
1363 bo_va->is_xgmi = true; in amdgpu_vm_bo_add()
1373 * amdgpu_vm_bo_insert_map - insert a new mapping
1385 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map()
1386 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_insert_map()
1388 mapping->bo_va = bo_va; in amdgpu_vm_bo_insert_map()
1389 list_add(&mapping->list, &bo_va->invalids); in amdgpu_vm_bo_insert_map()
1390 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1392 if (mapping->flags & AMDGPU_PTE_PRT) in amdgpu_vm_bo_insert_map()
1395 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
1396 !bo_va->base.moved) { in amdgpu_vm_bo_insert_map()
1397 amdgpu_vm_bo_moved(&bo_va->base); in amdgpu_vm_bo_insert_map()
1403 * amdgpu_vm_bo_map - map bo inside a vm
1425 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_map()
1426 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map()
1432 return -EINVAL; in amdgpu_vm_bo_map()
1435 eaddr = saddr + size - 1; in amdgpu_vm_bo_map()
1438 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) in amdgpu_vm_bo_map()
1439 return -EINVAL; in amdgpu_vm_bo_map()
1444 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1447 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " in amdgpu_vm_bo_map()
1448 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, in amdgpu_vm_bo_map()
1449 tmp->start, tmp->last + 1); in amdgpu_vm_bo_map()
1450 return -EINVAL; in amdgpu_vm_bo_map()
1455 return -ENOMEM; in amdgpu_vm_bo_map()
1457 mapping->start = saddr; in amdgpu_vm_bo_map()
1458 mapping->last = eaddr; in amdgpu_vm_bo_map()
1459 mapping->offset = offset; in amdgpu_vm_bo_map()
1460 mapping->flags = flags; in amdgpu_vm_bo_map()
1468 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1491 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_replace_map()
1498 return -EINVAL; in amdgpu_vm_bo_replace_map()
1501 eaddr = saddr + size - 1; in amdgpu_vm_bo_replace_map()
1504 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) in amdgpu_vm_bo_replace_map()
1505 return -EINVAL; in amdgpu_vm_bo_replace_map()
1510 return -ENOMEM; in amdgpu_vm_bo_replace_map()
1512 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1521 mapping->start = saddr; in amdgpu_vm_bo_replace_map()
1522 mapping->last = eaddr; in amdgpu_vm_bo_replace_map()
1523 mapping->offset = offset; in amdgpu_vm_bo_replace_map()
1524 mapping->flags = flags; in amdgpu_vm_bo_replace_map()
1532 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1550 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap()
1555 list_for_each_entry(mapping, &bo_va->valids, list) { in amdgpu_vm_bo_unmap()
1556 if (mapping->start == saddr) in amdgpu_vm_bo_unmap()
1560 if (&mapping->list == &bo_va->valids) { in amdgpu_vm_bo_unmap()
1563 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_unmap()
1564 if (mapping->start == saddr) in amdgpu_vm_bo_unmap()
1568 if (&mapping->list == &bo_va->invalids) in amdgpu_vm_bo_unmap()
1569 return -ENOENT; in amdgpu_vm_bo_unmap()
1572 list_del(&mapping->list); in amdgpu_vm_bo_unmap()
1573 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1574 mapping->bo_va = NULL; in amdgpu_vm_bo_unmap()
1578 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1581 bo_va->last_pt_update); in amdgpu_vm_bo_unmap()
1587 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1607 eaddr = saddr + size - 1; in amdgpu_vm_bo_clear_mappings()
1614 return -ENOMEM; in amdgpu_vm_bo_clear_mappings()
1615 INIT_LIST_HEAD(&before->list); in amdgpu_vm_bo_clear_mappings()
1620 return -ENOMEM; in amdgpu_vm_bo_clear_mappings()
1622 INIT_LIST_HEAD(&after->list); in amdgpu_vm_bo_clear_mappings()
1625 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
1628 if (tmp->start < saddr) { in amdgpu_vm_bo_clear_mappings()
1629 before->start = tmp->start; in amdgpu_vm_bo_clear_mappings()
1630 before->last = saddr - 1; in amdgpu_vm_bo_clear_mappings()
1631 before->offset = tmp->offset; in amdgpu_vm_bo_clear_mappings()
1632 before->flags = tmp->flags; in amdgpu_vm_bo_clear_mappings()
1633 before->bo_va = tmp->bo_va; in amdgpu_vm_bo_clear_mappings()
1634 list_add(&before->list, &tmp->bo_va->invalids); in amdgpu_vm_bo_clear_mappings()
1638 if (tmp->last > eaddr) { in amdgpu_vm_bo_clear_mappings()
1639 after->start = eaddr + 1; in amdgpu_vm_bo_clear_mappings()
1640 after->last = tmp->last; in amdgpu_vm_bo_clear_mappings()
1641 after->offset = tmp->offset; in amdgpu_vm_bo_clear_mappings()
1642 after->offset += (after->start - tmp->start) << PAGE_SHIFT; in amdgpu_vm_bo_clear_mappings()
1643 after->flags = tmp->flags; in amdgpu_vm_bo_clear_mappings()
1644 after->bo_va = tmp->bo_va; in amdgpu_vm_bo_clear_mappings()
1645 list_add(&after->list, &tmp->bo_va->invalids); in amdgpu_vm_bo_clear_mappings()
1648 list_del(&tmp->list); in amdgpu_vm_bo_clear_mappings()
1649 list_add(&tmp->list, &removed); in amdgpu_vm_bo_clear_mappings()
1656 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
1657 list_del(&tmp->list); in amdgpu_vm_bo_clear_mappings()
1659 if (tmp->start < saddr) in amdgpu_vm_bo_clear_mappings()
1660 tmp->start = saddr; in amdgpu_vm_bo_clear_mappings()
1661 if (tmp->last > eaddr) in amdgpu_vm_bo_clear_mappings()
1662 tmp->last = eaddr; in amdgpu_vm_bo_clear_mappings()
1664 tmp->bo_va = NULL; in amdgpu_vm_bo_clear_mappings()
1665 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
1670 if (!list_empty(&before->list)) { in amdgpu_vm_bo_clear_mappings()
1671 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
1672 if (before->flags & AMDGPU_PTE_PRT) in amdgpu_vm_bo_clear_mappings()
1679 if (!list_empty(&after->list)) { in amdgpu_vm_bo_clear_mappings()
1680 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
1681 if (after->flags & AMDGPU_PTE_PRT) in amdgpu_vm_bo_clear_mappings()
1691 * amdgpu_vm_bo_lookup_mapping - find mapping by address
1705 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
1709 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1723 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
1725 if (mapping->bo_va && mapping->bo_va->base.bo) { in amdgpu_vm_bo_trace_cs()
1728 bo = mapping->bo_va->base.bo; in amdgpu_vm_bo_trace_cs()
1729 if (dma_resv_locking_ctx(bo->tbo.base.resv) != in amdgpu_vm_bo_trace_cs()
1739 * amdgpu_vm_bo_del - remove a bo from a specific vm
1744 * Remove @bo_va->bo from the requested vm.
1752 struct amdgpu_bo *bo = bo_va->base.bo; in amdgpu_vm_bo_del()
1753 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del()
1756 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
1759 dma_resv_assert_held(bo->tbo.base.resv); in amdgpu_vm_bo_del()
1760 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_del()
1761 ttm_bo_set_bulk_move(&bo->tbo, NULL); in amdgpu_vm_bo_del()
1763 for (base = &bo_va->base.bo->vm_bo; *base; in amdgpu_vm_bo_del()
1764 base = &(*base)->next) { in amdgpu_vm_bo_del()
1765 if (*base != &bo_va->base) in amdgpu_vm_bo_del()
1768 *base = bo_va->base.next; in amdgpu_vm_bo_del()
1773 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
1774 list_del(&bo_va->base.vm_status); in amdgpu_vm_bo_del()
1775 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
1777 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { in amdgpu_vm_bo_del()
1778 list_del(&mapping->list); in amdgpu_vm_bo_del()
1779 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1780 mapping->bo_va = NULL; in amdgpu_vm_bo_del()
1782 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
1784 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { in amdgpu_vm_bo_del()
1785 list_del(&mapping->list); in amdgpu_vm_bo_del()
1786 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
1788 bo_va->last_pt_update); in amdgpu_vm_bo_del()
1791 dma_fence_put(bo_va->last_pt_update); in amdgpu_vm_bo_del()
1793 if (bo && bo_va->is_xgmi) in amdgpu_vm_bo_del()
1800 * amdgpu_vm_evictable - check if we can evict a VM
1808 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; in amdgpu_vm_evictable()
1811 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
1815 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) in amdgpu_vm_evictable()
1819 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
1823 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
1824 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
1828 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
1829 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
1834 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1848 if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) in amdgpu_vm_bo_invalidate()
1849 bo = bo->parent; in amdgpu_vm_bo_invalidate()
1851 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { in amdgpu_vm_bo_invalidate()
1852 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate()
1854 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
1859 if (bo_base->moved) in amdgpu_vm_bo_invalidate()
1861 bo_base->moved = true; in amdgpu_vm_bo_invalidate()
1863 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_invalidate()
1865 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
1873 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
1888 return (bits - 9); in amdgpu_vm_get_block_size()
1894 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
1907 unsigned int max_size = 1 << (max_bits - 30); in amdgpu_vm_adjust_size()
1912 if (amdgpu_vm_size != -1) { in amdgpu_vm_adjust_size()
1915 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", in amdgpu_vm_adjust_size()
1927 * - Need to map system memory and VRAM from all GPUs in amdgpu_vm_adjust_size()
1928 * - VRAM from other GPUs not known here in amdgpu_vm_adjust_size()
1929 * - Assume VRAM <= system memory in amdgpu_vm_adjust_size()
1930 * - On GFX8 and older, VM space can be segmented for in amdgpu_vm_adjust_size()
1932 * - Need to allow room for fragmentation, guard pages etc. in amdgpu_vm_adjust_size()
1940 (1 << 30) - 1) >> 30; in amdgpu_vm_adjust_size()
1945 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; in amdgpu_vm_adjust_size()
1947 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); in amdgpu_vm_adjust_size()
1948 if (amdgpu_vm_block_size != -1) in amdgpu_vm_adjust_size()
1949 tmp >>= amdgpu_vm_block_size - 9; in amdgpu_vm_adjust_size()
1950 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; in amdgpu_vm_adjust_size()
1951 adev->vm_manager.num_level = min(max_level, (unsigned)tmp); in amdgpu_vm_adjust_size()
1952 switch (adev->vm_manager.num_level) { in amdgpu_vm_adjust_size()
1954 adev->vm_manager.root_level = AMDGPU_VM_PDB2; in amdgpu_vm_adjust_size()
1957 adev->vm_manager.root_level = AMDGPU_VM_PDB1; in amdgpu_vm_adjust_size()
1960 adev->vm_manager.root_level = AMDGPU_VM_PDB0; in amdgpu_vm_adjust_size()
1963 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); in amdgpu_vm_adjust_size()
1966 if (amdgpu_vm_block_size != -1) in amdgpu_vm_adjust_size()
1967 adev->vm_manager.block_size = in amdgpu_vm_adjust_size()
1969 - AMDGPU_GPU_PAGE_SHIFT in amdgpu_vm_adjust_size()
1970 - 9 * adev->vm_manager.num_level); in amdgpu_vm_adjust_size()
1971 else if (adev->vm_manager.num_level > 1) in amdgpu_vm_adjust_size()
1972 adev->vm_manager.block_size = 9; in amdgpu_vm_adjust_size()
1974 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); in amdgpu_vm_adjust_size()
1976 if (amdgpu_vm_fragment_size == -1) in amdgpu_vm_adjust_size()
1977 adev->vm_manager.fragment_size = fragment_size_default; in amdgpu_vm_adjust_size()
1979 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; in amdgpu_vm_adjust_size()
1981 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", in amdgpu_vm_adjust_size()
1982 vm_size, adev->vm_manager.num_level + 1, in amdgpu_vm_adjust_size()
1983 adev->vm_manager.block_size, in amdgpu_vm_adjust_size()
1984 adev->vm_manager.fragment_size); in amdgpu_vm_adjust_size()
1988 * amdgpu_vm_wait_idle - wait for the VM to become idle
1995 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2001 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2005 * amdgpu_vm_init - initialize a vm instance
2021 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2023 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2024 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2025 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2026 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2027 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2028 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2029 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2030 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2031 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2032 INIT_LIST_HEAD(&vm->pt_freed); in amdgpu_vm_init()
2033 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); in amdgpu_vm_init()
2036 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2037 adev->vm_manager.vm_pte_scheds, in amdgpu_vm_init()
2038 adev->vm_manager.vm_pte_num_scheds, NULL); in amdgpu_vm_init()
2042 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init()
2043 adev->vm_manager.vm_pte_scheds, in amdgpu_vm_init()
2044 adev->vm_manager.vm_pte_num_scheds, NULL); in amdgpu_vm_init()
2048 vm->pte_support_ats = false; in amdgpu_vm_init()
2049 vm->is_compute_context = false; in amdgpu_vm_init()
2051 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2055 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2056 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2057 !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_init()
2060 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2061 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2063 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2064 vm->last_update = NULL; in amdgpu_vm_init()
2065 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2066 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2068 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2069 vm->evicting = false; in amdgpu_vm_init()
2071 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2075 root_bo = &root->bo; in amdgpu_vm_init()
2080 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); in amdgpu_vm_init()
2084 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2090 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2092 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2097 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2100 amdgpu_bo_unref(&root->shadow); in amdgpu_vm_init()
2102 vm->root.bo = NULL; in amdgpu_vm_init()
2105 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2106 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2107 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_init()
2110 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init()
2116 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2125 * - use_cpu_for_update
2126 * - pte_supports_ats
2132 * 0 for success, -errno for errors.
2136 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); in amdgpu_vm_make_compute()
2139 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2145 r = -EINVAL; in amdgpu_vm_make_compute()
2152 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
2153 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
2154 r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), in amdgpu_vm_make_compute()
2161 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2164 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2165 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2166 !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_make_compute()
2169 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2171 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2176 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2178 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2182 * when turning a GFX VM into a compute VM. in amdgpu_vm_make_compute()
2184 r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo)); in amdgpu_vm_make_compute()
2188 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2189 vm->last_update = NULL; in amdgpu_vm_make_compute()
2190 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2192 /* Free the shadow bo for compute VM */ in amdgpu_vm_make_compute()
2193 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); in amdgpu_vm_make_compute()
2198 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2203 * amdgpu_vm_release_compute - release a compute vm
2205 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2207 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2208 * pasid from vm. Compute should stop use of vm after this call.
2213 vm->is_compute_context = false; in amdgpu_vm_release_compute()
2217 * amdgpu_vm_fini - tear down a vm instance
2228 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; in amdgpu_vm_fini()
2235 flush_work(&vm->pt_free_work); in amdgpu_vm_fini()
2237 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2240 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2241 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2242 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2244 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2245 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2246 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2248 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2249 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { in amdgpu_vm_fini()
2254 list_del(&mapping->list); in amdgpu_vm_fini()
2261 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2263 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini()
2264 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini()
2266 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2267 dev_err(adev->dev, "still active bo inside vm\n"); in amdgpu_vm_fini()
2270 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2274 list_del(&mapping->list); in amdgpu_vm_fini()
2278 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2284 * amdgpu_vm_manager_init - init the VM manager
2297 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || in amdgpu_vm_manager_init()
2298 adev->asic_type == CHIP_NAVI10 || in amdgpu_vm_manager_init()
2299 adev->asic_type == CHIP_NAVI14); in amdgpu_vm_manager_init()
2302 adev->vm_manager.fence_context = in amdgpu_vm_manager_init()
2305 adev->vm_manager.seqno[i] = 0; in amdgpu_vm_manager_init()
2307 spin_lock_init(&adev->vm_manager.prt_lock); in amdgpu_vm_manager_init()
2308 atomic_set(&adev->vm_manager.num_prt_users, 0); in amdgpu_vm_manager_init()
2311 * Compute VM tables will be updated by CPU in amdgpu_vm_manager_init()
2314 if (amdgpu_vm_update_mode == -1) { in amdgpu_vm_manager_init()
2318 if (amdgpu_gmc_vram_full_visible(&adev->gmc) && in amdgpu_vm_manager_init()
2320 adev->vm_manager.vm_update_mode = in amdgpu_vm_manager_init()
2323 adev->vm_manager.vm_update_mode = 0; in amdgpu_vm_manager_init()
2325 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; in amdgpu_vm_manager_init()
2327 adev->vm_manager.vm_update_mode = 0; in amdgpu_vm_manager_init()
2330 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ); in amdgpu_vm_manager_init()
2334 * amdgpu_vm_manager_fini - cleanup VM manager
2342 WARN_ON(!xa_empty(&adev->vm_manager.pasids)); in amdgpu_vm_manager_fini()
2343 xa_destroy(&adev->vm_manager.pasids); in amdgpu_vm_manager_fini()
2349 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2356 * 0 for success, -errno for errors.
2362 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_vm_ioctl()
2366 switch (args->in.op) { in amdgpu_vm_ioctl()
2369 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, in amdgpu_vm_ioctl()
2381 r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); in amdgpu_vm_ioctl()
2385 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); in amdgpu_vm_ioctl()
2389 amdgpu_bo_unreserve(fpriv->vm.root.bo); in amdgpu_vm_ioctl()
2390 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); in amdgpu_vm_ioctl()
2393 return -EINVAL; in amdgpu_vm_ioctl()
2400 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2412 xa_lock_irqsave(&adev->vm_manager.pasids, flags); in amdgpu_vm_get_task_info()
2414 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_task_info()
2416 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
2418 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); in amdgpu_vm_get_task_info()
2422 * amdgpu_vm_set_task_info - Sets VMs task info.
2428 if (vm->task_info.pid) in amdgpu_vm_set_task_info()
2431 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
2432 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
2434 if (current->group_leader->mm != current->mm) in amdgpu_vm_set_task_info()
2437 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2438 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()
2442 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2461 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2462 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2464 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2465 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2469 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2487 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2488 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2489 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2491 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); in amdgpu_vm_handle_fault()
2500 * combination to force a no-retry-fault in amdgpu_vm_handle_fault()
2506 value = adev->dummy_page_addr; in amdgpu_vm_handle_fault()
2515 r = dma_resv_reserve_fences(root->tbo.base.resv, 1); in amdgpu_vm_handle_fault()
2541 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
2565 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2567 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2568 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2570 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2576 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2577 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2579 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2585 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2586 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2588 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2594 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2595 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2597 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2603 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2604 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2606 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2612 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2613 if (!bo_va->base.bo) in amdgpu_debugfs_vm_bo_info()
2615 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); in amdgpu_debugfs_vm_bo_info()
2617 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()