Lines Matching full:vm
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
42 * Each VM has an ID associated with it and there is a page table
78 * radeon_vm_manager_init - init the vm manager
82 * Init the vm manager (cayman+).
100 * radeon_vm_manager_fini - tear down the vm manager
104 * Tear down the VM manager (cayman+).
120 * radeon_vm_get_bos - add the vm BOs to a validation list
123 * @vm: vm providing the BOs
130 struct radeon_vm *vm, in radeon_vm_get_bos() argument
136 list = kvmalloc_array(vm->max_pde_used + 2, in radeon_vm_get_bos()
141 /* add the vm page table to the list */ in radeon_vm_get_bos()
142 list[0].robj = vm->page_directory; in radeon_vm_get_bos()
145 list[0].tv.bo = &vm->page_directory->tbo; in radeon_vm_get_bos()
150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { in radeon_vm_get_bos()
151 if (!vm->page_tables[i].bo) in radeon_vm_get_bos()
154 list[idx].robj = vm->page_tables[i].bo; in radeon_vm_get_bos()
170 * @vm: vm to allocate id for
173 * Allocate an id for the vm (cayman+).
179 struct radeon_vm *vm, int ring) in radeon_vm_grab_id() argument
182 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_grab_id()
195 /* skip over VMID 0, since it is the system VM */ in radeon_vm_grab_id()
226 * radeon_vm_flush - hardware flush the vm
229 * @vm: vm we want to flush
231 * @updates: last vm update that is waited for
233 * Flush the vm (cayman+).
238 struct radeon_vm *vm, in radeon_vm_flush() argument
241 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); in radeon_vm_flush()
242 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_flush()
247 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); in radeon_vm_flush()
258 * radeon_vm_fence - remember fence for vm
261 * @vm: vm we want to fence
264 * Fence the vm (cayman+).
270 struct radeon_vm *vm, in radeon_vm_fence() argument
273 unsigned vm_id = vm->ids[fence->ring].id; in radeon_vm_fence()
278 radeon_fence_unref(&vm->ids[fence->ring].last_id_use); in radeon_vm_fence()
279 vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence); in radeon_vm_fence()
283 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
285 * @vm: requested vm
288 * Find @bo inside the requested vm (cayman+).
289 * Search inside the @bos vm list for the requested vm
294 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, in radeon_vm_bo_find() argument
300 if (bo_va->vm == vm) in radeon_vm_bo_find()
308 * radeon_vm_bo_add - add a bo to a specific vm
311 * @vm: requested vm
314 * Add @bo into the requested vm (cayman+).
315 * Add @bo to the list of bos associated with the vm
321 struct radeon_vm *vm, in radeon_vm_bo_add() argument
330 bo_va->vm = vm; in radeon_vm_bo_add()
339 mutex_lock(&vm->mutex); in radeon_vm_bo_add()
341 mutex_unlock(&vm->mutex); in radeon_vm_bo_add()
434 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
438 * @soffset: requested offset of the buffer in the VM address space
442 * Validate and set the offset requested within the vm address space.
453 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_set_addr() local
478 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
483 it = interval_tree_iter_first(&vm->va, soffset, eoffset); in radeon_vm_bo_set_addr()
491 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
502 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
508 tmp->vm = vm; in radeon_vm_bo_set_addr()
511 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_bo_set_addr()
512 spin_lock(&vm->status_lock); in radeon_vm_bo_set_addr()
516 list_add(&tmp->vm_status, &vm->freed); in radeon_vm_bo_set_addr()
517 spin_unlock(&vm->status_lock); in radeon_vm_bo_set_addr()
521 spin_lock(&vm->status_lock); in radeon_vm_bo_set_addr()
524 list_add(&bo_va->vm_status, &vm->cleared); in radeon_vm_bo_set_addr()
525 spin_unlock(&vm->status_lock); in radeon_vm_bo_set_addr()
526 interval_tree_insert(&bo_va->it, &vm->va); in radeon_vm_bo_set_addr()
536 if (eoffset > vm->max_pde_used) in radeon_vm_bo_set_addr()
537 vm->max_pde_used = eoffset; in radeon_vm_bo_set_addr()
545 if (vm->page_tables[pt_idx].bo) in radeon_vm_bo_set_addr()
549 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
565 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
566 if (vm->page_tables[pt_idx].bo) { in radeon_vm_bo_set_addr()
568 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
570 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
574 vm->page_tables[pt_idx].addr = 0; in radeon_vm_bo_set_addr()
575 vm->page_tables[pt_idx].bo = pt; in radeon_vm_bo_set_addr()
578 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
632 * @vm: requested vm
641 struct radeon_vm *vm) in radeon_vm_update_page_directory() argument
643 struct radeon_bo *pd = vm->page_directory; in radeon_vm_update_page_directory()
655 ndw += vm->max_pde_used * 6; in radeon_vm_update_page_directory()
667 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { in radeon_vm_update_page_directory()
668 struct radeon_bo *bo = vm->page_tables[pt_idx].bo; in radeon_vm_update_page_directory()
675 if (vm->page_tables[pt_idx].addr == pt) in radeon_vm_update_page_directory()
677 vm->page_tables[pt_idx].addr = pt; in radeon_vm_update_page_directory()
803 * @vm: requested vm
815 struct radeon_vm *vm, in radeon_vm_update_ptes() argument
828 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; in radeon_vm_update_ptes()
877 * @vm: requested vm
886 static void radeon_vm_fence_pts(struct radeon_vm *vm, in radeon_vm_fence_pts() argument
896 radeon_bo_fence(vm->page_tables[i].bo, fence, true); in radeon_vm_fence_pts()
900 * radeon_vm_bo_update - map a bo into the vm page table
915 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_update() local
923 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", in radeon_vm_bo_update()
924 bo_va->bo, vm); in radeon_vm_bo_update()
928 spin_lock(&vm->status_lock); in radeon_vm_bo_update()
931 spin_unlock(&vm->status_lock); in radeon_vm_bo_update()
937 list_add(&bo_va->vm_status, &vm->cleared); in radeon_vm_bo_update()
939 spin_unlock(&vm->status_lock); in radeon_vm_bo_update()
1008 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); in radeon_vm_bo_update()
1011 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, in radeon_vm_bo_update()
1028 radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); in radeon_vm_bo_update()
1040 * @vm: requested vm
1048 struct radeon_vm *vm) in radeon_vm_clear_freed() argument
1053 spin_lock(&vm->status_lock); in radeon_vm_clear_freed()
1054 while (!list_empty(&vm->freed)) { in radeon_vm_clear_freed()
1055 bo_va = list_first_entry(&vm->freed, in radeon_vm_clear_freed()
1057 spin_unlock(&vm->status_lock); in radeon_vm_clear_freed()
1062 spin_lock(&vm->status_lock); in radeon_vm_clear_freed()
1069 spin_unlock(&vm->status_lock); in radeon_vm_clear_freed()
1078 * @vm: requested vm
1086 struct radeon_vm *vm) in radeon_vm_clear_invalids() argument
1091 spin_lock(&vm->status_lock); in radeon_vm_clear_invalids()
1092 while (!list_empty(&vm->invalidated)) { in radeon_vm_clear_invalids()
1093 bo_va = list_first_entry(&vm->invalidated, in radeon_vm_clear_invalids()
1095 spin_unlock(&vm->status_lock); in radeon_vm_clear_invalids()
1101 spin_lock(&vm->status_lock); in radeon_vm_clear_invalids()
1103 spin_unlock(&vm->status_lock); in radeon_vm_clear_invalids()
1109 * radeon_vm_bo_rmv - remove a bo to a specific vm
1114 * Remove @bo_va->bo from the requested vm (cayman+).
1121 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_rmv() local
1125 mutex_lock(&vm->mutex); in radeon_vm_bo_rmv()
1127 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_bo_rmv()
1129 spin_lock(&vm->status_lock); in radeon_vm_bo_rmv()
1133 list_add(&bo_va->vm_status, &vm->freed); in radeon_vm_bo_rmv()
1138 spin_unlock(&vm->status_lock); in radeon_vm_bo_rmv()
1140 mutex_unlock(&vm->mutex); in radeon_vm_bo_rmv()
1157 spin_lock(&bo_va->vm->status_lock); in radeon_vm_bo_invalidate()
1160 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); in radeon_vm_bo_invalidate()
1161 spin_unlock(&bo_va->vm->status_lock); in radeon_vm_bo_invalidate()
1166 * radeon_vm_init - initialize a vm instance
1169 * @vm: requested vm
1171 * Init @vm fields (cayman+).
1173 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) in radeon_vm_init() argument
1180 vm->ib_bo_va = NULL; in radeon_vm_init()
1182 vm->ids[i].id = 0; in radeon_vm_init()
1183 vm->ids[i].flushed_updates = NULL; in radeon_vm_init()
1184 vm->ids[i].last_id_use = NULL; in radeon_vm_init()
1186 mutex_init(&vm->mutex); in radeon_vm_init()
1187 vm->va = RB_ROOT_CACHED; in radeon_vm_init()
1188 spin_lock_init(&vm->status_lock); in radeon_vm_init()
1189 INIT_LIST_HEAD(&vm->invalidated); in radeon_vm_init()
1190 INIT_LIST_HEAD(&vm->freed); in radeon_vm_init()
1191 INIT_LIST_HEAD(&vm->cleared); in radeon_vm_init()
1198 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); in radeon_vm_init()
1199 if (vm->page_tables == NULL) { in radeon_vm_init()
1206 NULL, &vm->page_directory); in radeon_vm_init()
1210 r = radeon_vm_clear_bo(rdev, vm->page_directory); in radeon_vm_init()
1212 radeon_bo_unref(&vm->page_directory); in radeon_vm_init()
1213 vm->page_directory = NULL; in radeon_vm_init()
1221 * radeon_vm_fini - tear down a vm instance
1224 * @vm: requested vm
1226 * Tear down @vm (cayman+).
1227 * Unbind the VM and remove all bos from the vm bo list
1229 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) in radeon_vm_fini() argument
1234 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) in radeon_vm_fini()
1235 dev_err(rdev->dev, "still active bo inside vm\n"); in radeon_vm_fini()
1238 &vm->va.rb_root, it.rb) { in radeon_vm_fini()
1239 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_fini()
1248 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { in radeon_vm_fini()
1255 radeon_bo_unref(&vm->page_tables[i].bo); in radeon_vm_fini()
1256 kfree(vm->page_tables); in radeon_vm_fini()
1258 radeon_bo_unref(&vm->page_directory); in radeon_vm_fini()
1261 radeon_fence_unref(&vm->ids[i].flushed_updates); in radeon_vm_fini()
1262 radeon_fence_unref(&vm->ids[i].last_id_use); in radeon_vm_fini()
1265 mutex_destroy(&vm->mutex); in radeon_vm_fini()