Lines Matching full:vm
14 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument
16 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma()
17 i915_gem_shrink_all(vm->i915); in alloc_pt_dma()
19 return i915_gem_object_create_internal(vm->i915, sz); in alloc_pt_dma()
22 int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) in pin_pt_dma() argument
34 void __i915_vm_close(struct i915_address_space *vm) in __i915_vm_close() argument
38 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex)) in __i915_vm_close()
41 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { in __i915_vm_close()
54 GEM_BUG_ON(!list_empty(&vm->bound_list)); in __i915_vm_close()
56 mutex_unlock(&vm->mutex); in __i915_vm_close()
59 void i915_address_space_fini(struct i915_address_space *vm) in i915_address_space_fini() argument
61 drm_mm_takedown(&vm->mm); in i915_address_space_fini()
62 mutex_destroy(&vm->mutex); in i915_address_space_fini()
67 struct i915_address_space *vm = in __i915_vm_release() local
70 vm->cleanup(vm); in __i915_vm_release()
71 i915_address_space_fini(vm); in __i915_vm_release()
73 kfree(vm); in __i915_vm_release()
78 struct i915_address_space *vm = in i915_vm_release() local
81 GEM_BUG_ON(i915_is_ggtt(vm)); in i915_vm_release()
82 trace_i915_ppgtt_release(vm); in i915_vm_release()
84 queue_rcu_work(vm->i915->wq, &vm->rcu); in i915_vm_release()
87 void i915_address_space_init(struct i915_address_space *vm, int subclass) in i915_address_space_init() argument
89 kref_init(&vm->ref); in i915_address_space_init()
90 INIT_RCU_WORK(&vm->rcu, __i915_vm_release); in i915_address_space_init()
91 atomic_set(&vm->open, 1); in i915_address_space_init()
94 * The vm->mutex must be reclaim safe (for use in the shrinker). in i915_address_space_init()
98 mutex_init(&vm->mutex); in i915_address_space_init()
99 lockdep_set_subclass(&vm->mutex, subclass); in i915_address_space_init()
100 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); in i915_address_space_init()
102 GEM_BUG_ON(!vm->total); in i915_address_space_init()
103 drm_mm_init(&vm->mm, 0, vm->total); in i915_address_space_init()
104 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; in i915_address_space_init()
106 INIT_LIST_HEAD(&vm->bound_list); in i915_address_space_init()
165 int setup_scratch_page(struct i915_address_space *vm) in setup_scratch_page() argument
177 * scratch (read-only) between all vm, we create one 64k scratch page in setup_scratch_page()
181 if (i915_vm_is_4lvl(vm) && in setup_scratch_page()
182 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) in setup_scratch_page()
188 obj = vm->alloc_pt_dma(vm, size); in setup_scratch_page()
192 if (pin_pt_dma(vm, obj)) in setup_scratch_page()
214 vm->scratch[0] = obj; in setup_scratch_page()
215 vm->scratch_order = get_order(size); in setup_scratch_page()
228 void free_scratch(struct i915_address_space *vm) in free_scratch() argument
232 for (i = 0; i <= vm->top; i++) in free_scratch()
233 i915_gem_object_put(vm->scratch[i]); in free_scratch()