Lines Matching full:vm
29 static inline bool suspend_retains_ptes(struct i915_address_space *vm) in suspend_retains_ptes() argument
31 return GRAPHICS_VER(vm->i915) >= 8 && in suspend_retains_ptes()
32 !HAS_LMEM(vm->i915) && in suspend_retains_ptes()
33 vm->is_ggtt; in suspend_retains_ptes()
57 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
59 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
61 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
64 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
67 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
73 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
111 static u64 read_last_pte(struct i915_address_space *vm) in read_last_pte() argument
113 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in read_last_pte()
116 if (!suspend_retains_ptes(vm)) in read_last_pte()
119 GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8); in read_last_pte()
125 * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
126 * @vm: The VM to suspend the mappings for
131 void i915_ggtt_suspend_vm(struct i915_address_space *vm) in i915_ggtt_suspend_vm() argument
136 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_suspend_vm()
139 i915_gem_drain_freed_objects(vm->i915); in i915_ggtt_suspend_vm()
141 mutex_lock(&vm->mutex); in i915_ggtt_suspend_vm()
147 save_skip_rewrite = vm->skip_pte_rewrite; in i915_ggtt_suspend_vm()
148 vm->skip_pte_rewrite = true; in i915_ggtt_suspend_vm()
150 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { in i915_ggtt_suspend_vm()
166 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
173 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
187 if (!suspend_retains_ptes(vm)) in i915_ggtt_suspend_vm()
188 vm->clear_range(vm, 0, vm->total); in i915_ggtt_suspend_vm()
190 i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm); in i915_ggtt_suspend_vm()
192 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
194 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
199 i915_ggtt_suspend_vm(&ggtt->vm); in i915_ggtt_suspend()
202 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_suspend()
207 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
217 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
228 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in guc_ggtt_invalidate()
229 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
257 static void gen8_ggtt_insert_page(struct i915_address_space *vm, in gen8_ggtt_insert_page() argument
263 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_page()
272 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, in gen8_ggtt_insert_entries() argument
278 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_entries()
299 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
308 static void gen6_ggtt_insert_page(struct i915_address_space *vm, in gen6_ggtt_insert_page() argument
314 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_page()
318 iowrite32(vm->pte_encode(addr, level, flags), pte); in gen6_ggtt_insert_page()
329 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, in gen6_ggtt_insert_entries() argument
334 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_entries()
345 iowrite32(vm->pte_encode(addr, level, flags), gte++); in gen6_ggtt_insert_entries()
350 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
359 static void nop_clear_range(struct i915_address_space *vm, in nop_clear_range() argument
364 static void gen8_ggtt_clear_range(struct i915_address_space *vm, in gen8_ggtt_clear_range() argument
367 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_clear_range()
370 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
385 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) in bxt_vtd_ggtt_wa() argument
394 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
398 struct i915_address_space *vm; member
408 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); in bxt_vtd_ggtt_insert_page__cb()
409 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
414 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_page__BKL() argument
420 struct insert_page arg = { vm, addr, offset, level }; in bxt_vtd_ggtt_insert_page__BKL()
426 struct i915_address_space *vm; member
436 gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags); in bxt_vtd_ggtt_insert_entries__cb()
437 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
442 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_entries__BKL() argument
447 struct insert_entries arg = { vm, vma_res, level, flags }; in bxt_vtd_ggtt_insert_entries__BKL()
452 static void gen6_ggtt_clear_range(struct i915_address_space *vm, in gen6_ggtt_clear_range() argument
455 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_clear_range()
468 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
473 void intel_ggtt_bind_vma(struct i915_address_space *vm, in intel_ggtt_bind_vma() argument
493 vm->insert_entries(vm, vma_res, cache_level, pte_flags); in intel_ggtt_bind_vma()
497 void intel_ggtt_unbind_vma(struct i915_address_space *vm, in intel_ggtt_unbind_vma() argument
500 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in intel_ggtt_unbind_vma()
508 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
511 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); in ggtt_reserve_guc_top()
512 size = ggtt->vm.total - GUC_GGTT_TOP; in ggtt_reserve_guc_top()
514 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size, in ggtt_reserve_guc_top()
518 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
563 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); in init_ggtt()
591 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) in init_ggtt()
592 drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
600 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
615 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
616 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
619 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
624 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
633 static void aliasing_gtt_bind_vma(struct i915_address_space *vm, in aliasing_gtt_bind_vma() argument
647 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
651 vm->insert_entries(vm, vma_res, cache_level, pte_flags); in aliasing_gtt_bind_vma()
656 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, in aliasing_gtt_unbind_vma() argument
660 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in aliasing_gtt_unbind_vma()
663 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res); in aliasing_gtt_unbind_vma()
672 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0); in init_aliasing_ppgtt()
676 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
681 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
685 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); in init_aliasing_ppgtt()
686 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
687 i915_gem_object_unlock(ppgtt->vm.scratch[0]); in init_aliasing_ppgtt()
697 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
700 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
702 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma); in init_aliasing_ppgtt()
703 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
705 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma); in init_aliasing_ppgtt()
706 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
708 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
712 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
714 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
726 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
728 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in fini_aliasing_ppgtt()
729 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in fini_aliasing_ppgtt()
753 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
754 i915_gem_drain_freed_objects(ggtt->vm.i915); in ggtt_cleanup_hw()
756 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
758 ggtt->vm.skip_pte_rewrite = true; in ggtt_cleanup_hw()
760 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in ggtt_cleanup_hw()
779 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
781 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
782 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
813 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); in i915_ggtt_driver_late_release()
814 dma_resv_fini(&ggtt->vm._resv); in i915_ggtt_driver_late_release()
868 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
893 kref_init(&ggtt->vm.resv_ref); in ggtt_probe_common()
894 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
903 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) in ggtt_probe_common()
906 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
907 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
913 static void gen6_gmch_remove(struct i915_address_space *vm) in gen6_gmch_remove() argument
915 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_gmch_remove()
918 free_scratch(vm); in gen6_gmch_remove()
929 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
948 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
949 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen8_gmch_probe()
950 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; in gen8_gmch_probe()
952 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
953 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
954 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
955 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
957 ggtt->vm.clear_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
959 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
966 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
967 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
975 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
976 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
978 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
984 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen8_gmch_probe()
985 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen8_gmch_probe()
987 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
989 setup_private_pat(ggtt->vm.gt->uncore); in gen8_gmch_probe()
1087 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
1112 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
1114 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1115 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen6_gmch_probe()
1117 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1119 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1120 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1121 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1122 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1127 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1129 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1131 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1133 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1135 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1137 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen6_gmch_probe()
1138 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen6_gmch_probe()
1148 ggtt->vm.gt = gt; in ggtt_probe_hw()
1149 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1150 ggtt->vm.dma = i915->drm.dev; in ggtt_probe_hw()
1151 dma_resv_init(&ggtt->vm._resv); in ggtt_probe_hw()
1161 dma_resv_fini(&ggtt->vm._resv); in ggtt_probe_hw()
1165 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1169 ggtt->vm.total >> 20); in ggtt_probe_hw()
1170 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1172 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1175 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1179 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1180 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1184 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1243 * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1244 * @vm: The VM to restore the mappings for
1252 bool i915_ggtt_resume_vm(struct i915_address_space *vm) in i915_ggtt_resume_vm() argument
1258 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_resume_vm()
1264 retained_ptes = suspend_retains_ptes(vm) && in i915_ggtt_resume_vm()
1265 !i915_vm_to_ggtt(vm)->pte_lost && in i915_ggtt_resume_vm()
1266 !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm)); in i915_ggtt_resume_vm()
1269 vm->clear_range(vm, 0, vm->total); in i915_ggtt_resume_vm()
1272 list_for_each_entry(vma, &vm->bound_list, vm_link) { in i915_ggtt_resume_vm()
1284 vma->ops->bind_vma(vm, NULL, vma->resource, in i915_ggtt_resume_vm()
1301 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_resume()
1303 flush = i915_ggtt_resume_vm(&ggtt->vm); in i915_ggtt_resume()
1310 if (GRAPHICS_VER(ggtt->vm.i915) >= 8) in i915_ggtt_resume()
1311 setup_private_pat(ggtt->vm.gt->uncore); in i915_ggtt_resume()