Lines Matching refs:vm

202 	struct amdgpu_vm *vm = vm_bo->vm;  in amdgpu_vm_bo_evicted()  local
207 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
209 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
221 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
234 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
247 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
261 spin_lock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_invalidated()
262 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
263 spin_unlock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_invalidated()
276 spin_lock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_done()
278 spin_unlock(&vm_bo->vm->invalidated_lock); in amdgpu_vm_bo_done()
292 struct amdgpu_vm *vm, in amdgpu_vm_bo_base_init() argument
295 base->vm = vm; in amdgpu_vm_bo_base_init()
305 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_base_init()
308 vm->bulk_moveable = false; in amdgpu_vm_bo_base_init()
365 struct amdgpu_vm *vm, uint64_t start, in amdgpu_vm_pt_start() argument
370 cursor->entry = &vm->root; in amdgpu_vm_pt_start()
490 struct amdgpu_vm *vm, in amdgpu_vm_pt_first_dfs() argument
497 amdgpu_vm_pt_start(adev, vm, 0, cursor); in amdgpu_vm_pt_first_dfs()
541 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ argument
542 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
557 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, in amdgpu_vm_get_pd_bo() argument
562 entry->tv.bo = &vm->root.base.bo->tbo; in amdgpu_vm_get_pd_bo()
584 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_del_from_lru_notify() local
586 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify()
587 vm->bulk_moveable = false; in amdgpu_vm_del_from_lru_notify()
601 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
606 if (vm->bulk_moveable) { in amdgpu_vm_move_to_lru_tail()
608 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
613 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); in amdgpu_vm_move_to_lru_tail()
616 list_for_each_entry(bo_base, &vm->idle, vm_status) { in amdgpu_vm_move_to_lru_tail()
622 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
625 &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
629 vm->bulk_moveable = true; in amdgpu_vm_move_to_lru_tail()
645 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate_pt_bos() argument
652 vm->bulk_moveable &= list_empty(&vm->evicted); in amdgpu_vm_validate_pt_bos()
654 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { in amdgpu_vm_validate_pt_bos()
664 vm->update_funcs->map_table(bo); in amdgpu_vm_validate_pt_bos()
685 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
687 return list_empty(&vm->evicted); in amdgpu_vm_ready()
703 struct amdgpu_vm *vm, in amdgpu_vm_clear_bo() argument
724 if (!vm->pte_support_ats) { in amdgpu_vm_clear_bo()
737 if ((pt - vm->root.entries) >= ats_entries) { in amdgpu_vm_clear_bo()
756 r = vm->update_funcs->map_table(bo); in amdgpu_vm_clear_bo()
762 params.vm = vm; in amdgpu_vm_clear_bo()
764 r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL); in amdgpu_vm_clear_bo()
779 r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries, in amdgpu_vm_clear_bo()
802 r = vm->update_funcs->update(&params, bo, addr, 0, entries, in amdgpu_vm_clear_bo()
808 return vm->update_funcs->commit(&params, NULL); in amdgpu_vm_clear_bo()
818 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_bo_param() argument
829 if (vm->use_cpu_for_update) in amdgpu_vm_bo_param()
831 else if (!vm->root.base.bo || vm->root.base.bo->shadow) in amdgpu_vm_bo_param()
834 if (vm->root.base.bo) in amdgpu_vm_bo_param()
835 bp->resv = vm->root.base.bo->tbo.base.resv; in amdgpu_vm_bo_param()
852 struct amdgpu_vm *vm, in amdgpu_vm_alloc_pts() argument
874 amdgpu_vm_bo_param(adev, vm, cursor->level, &bp); in amdgpu_vm_alloc_pts()
884 amdgpu_vm_bo_base_init(&entry->base, vm, pt); in amdgpu_vm_alloc_pts()
886 r = amdgpu_vm_clear_bo(adev, vm, pt); in amdgpu_vm_alloc_pts()
925 struct amdgpu_vm *vm, in amdgpu_vm_free_pts() argument
931 vm->bulk_moveable = false; in amdgpu_vm_free_pts()
933 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) in amdgpu_vm_free_pts()
1134 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
1140 if (base->vm != vm) in amdgpu_vm_bo_find()
1185 struct amdgpu_vm *vm, in amdgpu_vm_update_pde() argument
1199 return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); in amdgpu_vm_update_pde()
1211 struct amdgpu_vm *vm) in amdgpu_vm_invalidate_pds() argument
1216 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) in amdgpu_vm_invalidate_pds()
1233 struct amdgpu_vm *vm) in amdgpu_vm_update_directories() argument
1238 if (list_empty(&vm->relocated)) in amdgpu_vm_update_directories()
1243 params.vm = vm; in amdgpu_vm_update_directories()
1245 r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL); in amdgpu_vm_update_directories()
1249 while (!list_empty(&vm->relocated)) { in amdgpu_vm_update_directories()
1252 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, in amdgpu_vm_update_directories()
1256 r = amdgpu_vm_update_pde(&params, vm, entry); in amdgpu_vm_update_directories()
1261 r = vm->update_funcs->commit(&params, &vm->last_update); in amdgpu_vm_update_directories()
1267 amdgpu_vm_invalidate_pds(adev, vm); in amdgpu_vm_update_directories()
1295 params->vm->update_funcs->update(params, bo, pe, addr, count, incr, in amdgpu_vm_update_flags()
1388 amdgpu_vm_pt_start(adev, params->vm, start, &cursor); in amdgpu_vm_update_ptes()
1394 r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor); in amdgpu_vm_update_ptes()
1468 amdgpu_vm_free_pts(adev, params->vm, &cursor); in amdgpu_vm_update_ptes()
1502 struct amdgpu_vm *vm, in amdgpu_vm_bo_update_mapping() argument
1513 params.vm = vm; in amdgpu_vm_bo_update_mapping()
1520 r = vm->update_funcs->prepare(&params, owner, exclusive); in amdgpu_vm_bo_update_mapping()
1528 return vm->update_funcs->commit(&params, fence); in amdgpu_vm_bo_update_mapping()
1553 struct amdgpu_vm *vm, in amdgpu_vm_bo_split_mapping() argument
1645 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm, in amdgpu_vm_bo_split_mapping()
1680 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1713 if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)) in amdgpu_vm_bo_update()
1714 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1727 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, in amdgpu_vm_bo_update()
1734 if (vm->use_cpu_for_update) { in amdgpu_vm_bo_update()
1744 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { in amdgpu_vm_bo_update()
1861 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
1878 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
1880 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1924 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
1932 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1933 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1937 if (vm->pte_support_ats && in amdgpu_vm_clear_freed()
1941 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, in amdgpu_vm_clear_freed()
1944 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
1976 struct amdgpu_vm *vm) in amdgpu_vm_handle_moved() argument
1983 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_vm_handle_moved()
1990 spin_lock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
1991 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1992 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1995 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2010 spin_lock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2012 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_handle_moved()
2033 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
2042 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
2075 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
2080 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
2085 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && in amdgpu_vm_bo_insert_map()
2087 list_move(&bo_va->base.vm_status, &vm->moved); in amdgpu_vm_bo_insert_map()
2116 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
2133 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
2200 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
2238 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
2261 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
2266 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
2268 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
2288 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
2313 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
2344 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
2353 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
2359 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
2368 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
2390 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
2393 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
2404 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
2411 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
2441 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_rmv() local
2445 if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_rmv()
2446 vm->bulk_moveable = false; in amdgpu_vm_bo_rmv()
2458 spin_lock(&vm->invalidated_lock); in amdgpu_vm_bo_rmv()
2460 spin_unlock(&vm->invalidated_lock); in amdgpu_vm_bo_rmv()
2464 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_rmv()
2467 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_rmv()
2471 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_rmv()
2472 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_rmv()
2507 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
2509 if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { in amdgpu_vm_bo_invalidate()
2520 else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_invalidate()
2648 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2650 return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2667 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_init() argument
2674 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2676 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2677 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2678 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2679 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2680 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2681 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2682 spin_lock_init(&vm->invalidated_lock); in amdgpu_vm_init()
2683 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2686 r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs, in amdgpu_vm_init()
2691 vm->pte_support_ats = false; in amdgpu_vm_init()
2694 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2698 vm->pte_support_ats = true; in amdgpu_vm_init()
2700 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2704 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2705 WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_init()
2708 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2709 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2711 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2712 vm->last_update = NULL; in amdgpu_vm_init()
2714 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp); in amdgpu_vm_init()
2729 amdgpu_vm_bo_base_init(&vm->root.base, vm, root); in amdgpu_vm_init()
2731 r = amdgpu_vm_clear_bo(adev, vm, root); in amdgpu_vm_init()
2735 amdgpu_bo_unreserve(vm->root.base.bo); in amdgpu_vm_init()
2741 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, in amdgpu_vm_init()
2747 vm->pasid = pasid; in amdgpu_vm_init()
2750 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2755 amdgpu_bo_unreserve(vm->root.base.bo); in amdgpu_vm_init()
2758 amdgpu_bo_unref(&vm->root.base.bo->shadow); in amdgpu_vm_init()
2759 amdgpu_bo_unref(&vm->root.base.bo); in amdgpu_vm_init()
2760 vm->root.base.bo = NULL; in amdgpu_vm_init()
2763 drm_sched_entity_destroy(&vm->entity); in amdgpu_vm_init()
2782 struct amdgpu_vm *vm) in amdgpu_vm_check_clean_reserved() argument
2788 if (!(vm->root.entries)) in amdgpu_vm_check_clean_reserved()
2792 if (vm->root.entries[i].base.bo) in amdgpu_vm_check_clean_reserved()
2819 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) in amdgpu_vm_make_compute() argument
2824 r = amdgpu_bo_reserve(vm->root.base.bo, true); in amdgpu_vm_make_compute()
2829 r = amdgpu_vm_check_clean_reserved(adev, vm); in amdgpu_vm_make_compute()
2837 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, in amdgpu_vm_make_compute()
2849 if (pte_support_ats != vm->pte_support_ats) { in amdgpu_vm_make_compute()
2850 vm->pte_support_ats = pte_support_ats; in amdgpu_vm_make_compute()
2851 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo); in amdgpu_vm_make_compute()
2857 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2860 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2861 WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), in amdgpu_vm_make_compute()
2864 if (vm->use_cpu_for_update) in amdgpu_vm_make_compute()
2865 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2867 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2868 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2869 vm->last_update = NULL; in amdgpu_vm_make_compute()
2871 if (vm->pasid) { in amdgpu_vm_make_compute()
2875 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); in amdgpu_vm_make_compute()
2881 amdgpu_pasid_free(vm->pasid); in amdgpu_vm_make_compute()
2882 vm->pasid = 0; in amdgpu_vm_make_compute()
2886 amdgpu_bo_unref(&vm->root.base.bo->shadow); in amdgpu_vm_make_compute()
2889 vm->pasid = pasid; in amdgpu_vm_make_compute()
2902 amdgpu_bo_unreserve(vm->root.base.bo); in amdgpu_vm_make_compute()
2914 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_release_compute() argument
2916 if (vm->pasid) { in amdgpu_vm_release_compute()
2920 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); in amdgpu_vm_release_compute()
2923 vm->pasid = 0; in amdgpu_vm_release_compute()
2935 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
2942 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
2944 if (vm->pasid) { in amdgpu_vm_fini()
2948 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); in amdgpu_vm_fini()
2952 drm_sched_entity_destroy(&vm->entity); in amdgpu_vm_fini()
2954 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2958 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2965 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2967 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
2972 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
2975 root = amdgpu_bo_ref(vm->root.base.bo); in amdgpu_vm_fini()
2980 amdgpu_vm_free_pts(adev, vm, NULL); in amdgpu_vm_fini()
2984 WARN_ON(vm->root.base.bo); in amdgpu_vm_fini()
2985 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2987 amdgpu_vmid_free_reserved(adev, vm, i); in amdgpu_vm_fini()
3069 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); in amdgpu_vm_ioctl()
3074 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); in amdgpu_vm_ioctl()
3093 struct amdgpu_vm *vm; in amdgpu_vm_get_task_info() local
3098 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); in amdgpu_vm_get_task_info()
3099 if (vm) in amdgpu_vm_get_task_info()
3100 *task_info = vm->task_info; in amdgpu_vm_get_task_info()
3110 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
3112 if (!vm->task_info.pid) { in amdgpu_vm_set_task_info()
3113 vm->task_info.pid = current->pid; in amdgpu_vm_set_task_info()
3114 get_task_comm(vm->task_info.task_name, current); in amdgpu_vm_set_task_info()
3117 vm->task_info.tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
3118 get_task_comm(vm->task_info.process_name, current->group_leader); in amdgpu_vm_set_task_info()