Lines Matching full:vm
32 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) in alloc_pt_lmem() argument
48 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, in alloc_pt_lmem()
49 vm->lmem_pt_obj_flags); in alloc_pt_lmem()
51 * Ensure all paging structures for this vm share the same dma-resv in alloc_pt_lmem()
56 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_lmem()
57 obj->shares_resv_from = vm; in alloc_pt_lmem()
63 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument
67 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma()
68 i915_gem_shrink_all(vm->i915); in alloc_pt_dma()
70 obj = i915_gem_object_create_internal(vm->i915, sz); in alloc_pt_dma()
72 * Ensure all paging structures for this vm share the same dma-resv in alloc_pt_dma()
77 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_dma()
78 obj->shares_resv_from = vm; in alloc_pt_dma()
84 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) in map_pt_dma() argument
89 type = i915_coherent_map_type(vm->i915, obj, true); in map_pt_dma()
98 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) in map_pt_dma_locked() argument
103 type = i915_coherent_map_type(vm->i915, obj, true); in map_pt_dma_locked()
135 * Delay the vm and vm mutex freeing until the in clear_vm_list()
138 i915_vm_resv_get(vma->vm); in clear_vm_list()
148 static void __i915_vm_close(struct i915_address_space *vm) in __i915_vm_close() argument
150 mutex_lock(&vm->mutex); in __i915_vm_close()
152 clear_vm_list(&vm->bound_list); in __i915_vm_close()
153 clear_vm_list(&vm->unbound_list); in __i915_vm_close()
156 GEM_BUG_ON(!list_empty(&vm->bound_list)); in __i915_vm_close()
157 GEM_BUG_ON(!list_empty(&vm->unbound_list)); in __i915_vm_close()
159 mutex_unlock(&vm->mutex); in __i915_vm_close()
162 /* lock the vm into the current ww, if we lock one, we lock all */
163 int i915_vm_lock_objects(struct i915_address_space *vm, in i915_vm_lock_objects() argument
166 if (vm->scratch[0]->base.resv == &vm->_resv) { in i915_vm_lock_objects()
167 return i915_gem_object_lock(vm->scratch[0], ww); in i915_vm_lock_objects()
169 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); in i915_vm_lock_objects()
176 void i915_address_space_fini(struct i915_address_space *vm) in i915_address_space_fini() argument
178 drm_mm_takedown(&vm->mm); in i915_address_space_fini()
191 struct i915_address_space *vm = in i915_vm_resv_release() local
192 container_of(kref, typeof(*vm), resv_ref); in i915_vm_resv_release()
194 dma_resv_fini(&vm->_resv); in i915_vm_resv_release()
195 mutex_destroy(&vm->mutex); in i915_vm_resv_release()
197 kfree(vm); in i915_vm_resv_release()
202 struct i915_address_space *vm = in __i915_vm_release() local
205 __i915_vm_close(vm); in __i915_vm_release()
208 i915_vma_resource_bind_dep_sync_all(vm); in __i915_vm_release()
210 vm->cleanup(vm); in __i915_vm_release()
211 i915_address_space_fini(vm); in __i915_vm_release()
213 i915_vm_resv_put(vm); in __i915_vm_release()
218 struct i915_address_space *vm = in i915_vm_release() local
221 GEM_BUG_ON(i915_is_ggtt(vm)); in i915_vm_release()
222 trace_i915_ppgtt_release(vm); in i915_vm_release()
224 queue_work(vm->i915->wq, &vm->release_work); in i915_vm_release()
227 void i915_address_space_init(struct i915_address_space *vm, int subclass) in i915_address_space_init() argument
229 kref_init(&vm->ref); in i915_address_space_init()
235 if (!kref_read(&vm->resv_ref)) in i915_address_space_init()
236 kref_init(&vm->resv_ref); in i915_address_space_init()
238 vm->pending_unbind = RB_ROOT_CACHED; in i915_address_space_init()
239 INIT_WORK(&vm->release_work, __i915_vm_release); in i915_address_space_init()
242 * The vm->mutex must be reclaim safe (for use in the shrinker). in i915_address_space_init()
246 mutex_init(&vm->mutex); in i915_address_space_init()
247 lockdep_set_subclass(&vm->mutex, subclass); in i915_address_space_init()
249 if (!intel_vm_no_concurrent_access_wa(vm->i915)) { in i915_address_space_init()
250 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); in i915_address_space_init()
254 * which is allowed to allocate memory. This means &vm->mutex in i915_address_space_init()
260 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_); in i915_address_space_init()
262 mutex_release(&vm->mutex.dep_map, _THIS_IP_); in i915_address_space_init()
264 dma_resv_init(&vm->_resv); in i915_address_space_init()
266 GEM_BUG_ON(!vm->total); in i915_address_space_init()
267 drm_mm_init(&vm->mm, 0, vm->total); in i915_address_space_init()
269 memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT, in i915_address_space_init()
270 ARRAY_SIZE(vm->min_alignment)); in i915_address_space_init()
272 if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) && in i915_address_space_init()
274 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M; in i915_address_space_init()
275 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M; in i915_address_space_init()
276 } else if (HAS_64K_PAGES(vm->i915)) { in i915_address_space_init()
277 vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K; in i915_address_space_init()
278 vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K; in i915_address_space_init()
281 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; in i915_address_space_init()
283 INIT_LIST_HEAD(&vm->bound_list); in i915_address_space_init()
284 INIT_LIST_HEAD(&vm->unbound_list); in i915_address_space_init()
329 int setup_scratch_page(struct i915_address_space *vm) in setup_scratch_page() argument
341 * scratch (read-only) between all vm, we create one 64k scratch page in setup_scratch_page()
345 if (i915_vm_is_4lvl(vm) && in setup_scratch_page()
346 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) in setup_scratch_page()
352 obj = vm->alloc_scratch_dma(vm, size); in setup_scratch_page()
356 if (map_pt_dma(vm, obj)) in setup_scratch_page()
378 vm->scratch[0] = obj; in setup_scratch_page()
379 vm->scratch_order = get_order(size); in setup_scratch_page()
392 * local-memory pages for this vm, since the HW expects the in setup_scratch_page()
397 if (HAS_64K_PAGES(vm->i915)) in setup_scratch_page()
404 void free_scratch(struct i915_address_space *vm) in free_scratch() argument
408 if (!vm->scratch[0]) in free_scratch()
411 for (i = 0; i <= vm->top; i++) in free_scratch()
412 i915_gem_object_put(vm->scratch[i]); in free_scratch()
605 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size) in __vm_create_scratch_for_read() argument
610 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size)); in __vm_create_scratch_for_read()
616 vma = i915_vma_instance(obj, vm, NULL); in __vm_create_scratch_for_read()
626 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size) in __vm_create_scratch_for_read_pinned() argument
631 vma = __vm_create_scratch_for_read(vm, size); in __vm_create_scratch_for_read_pinned()