Lines Matching full:vm
47 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
49 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
51 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
54 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
57 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
63 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
124 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend()
127 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend()
129 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend()
142 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend()
144 atomic_set(&ggtt->vm.open, open); in i915_ggtt_suspend()
146 mutex_unlock(&ggtt->vm.mutex); in i915_ggtt_suspend()
148 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_suspend()
153 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
163 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
174 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in guc_ggtt_invalidate()
175 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
208 static void gen8_ggtt_insert_page(struct i915_address_space *vm, in gen8_ggtt_insert_page() argument
214 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_page()
223 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, in gen8_ggtt_insert_entries() argument
229 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_entries()
250 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
259 static void gen6_ggtt_insert_page(struct i915_address_space *vm, in gen6_ggtt_insert_page() argument
265 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_page()
269 iowrite32(vm->pte_encode(addr, level, flags), pte); in gen6_ggtt_insert_page()
280 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, in gen6_ggtt_insert_entries() argument
285 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_entries()
296 iowrite32(vm->pte_encode(addr, level, flags), gte++); in gen6_ggtt_insert_entries()
301 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
310 static void nop_clear_range(struct i915_address_space *vm, in nop_clear_range() argument
315 static void gen8_ggtt_clear_range(struct i915_address_space *vm, in gen8_ggtt_clear_range() argument
318 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_clear_range()
321 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
336 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) in bxt_vtd_ggtt_wa() argument
345 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
349 struct i915_address_space *vm; member
359 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); in bxt_vtd_ggtt_insert_page__cb()
360 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
365 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_page__BKL() argument
371 struct insert_page arg = { vm, addr, offset, level }; in bxt_vtd_ggtt_insert_page__BKL()
377 struct i915_address_space *vm; member
387 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); in bxt_vtd_ggtt_insert_entries__cb()
388 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
393 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_entries__BKL() argument
398 struct insert_entries arg = { vm, vma, level, flags }; in bxt_vtd_ggtt_insert_entries__BKL()
403 static void gen6_ggtt_clear_range(struct i915_address_space *vm, in gen6_ggtt_clear_range() argument
406 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_clear_range()
419 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
424 static void i915_ggtt_insert_page(struct i915_address_space *vm, in i915_ggtt_insert_page() argument
436 static void i915_ggtt_insert_entries(struct i915_address_space *vm, in i915_ggtt_insert_entries() argument
448 static void i915_ggtt_clear_range(struct i915_address_space *vm, in i915_ggtt_clear_range() argument
454 static void ggtt_bind_vma(struct i915_address_space *vm, in ggtt_bind_vma() argument
473 vm->insert_entries(vm, vma, cache_level, pte_flags); in ggtt_bind_vma()
477 static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) in ggtt_unbind_vma() argument
479 vm->clear_range(vm, vma->node.start, vma->size); in ggtt_unbind_vma()
487 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
490 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); in ggtt_reserve_guc_top()
491 size = ggtt->vm.total - GUC_GGTT_TOP; in ggtt_reserve_guc_top()
493 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, in ggtt_reserve_guc_top()
497 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
540 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); in init_ggtt()
568 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) in init_ggtt()
569 drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
577 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
592 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
593 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
596 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
601 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
610 static void aliasing_gtt_bind_vma(struct i915_address_space *vm, in aliasing_gtt_bind_vma() argument
624 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
628 vm->insert_entries(vm, vma, cache_level, pte_flags); in aliasing_gtt_bind_vma()
631 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, in aliasing_gtt_unbind_vma() argument
635 vm->clear_range(vm, vma->node.start, vma->size); in aliasing_gtt_unbind_vma()
638 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); in aliasing_gtt_unbind_vma()
647 ppgtt = i915_ppgtt_create(ggtt->vm.gt); in init_aliasing_ppgtt()
651 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
656 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
660 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); in init_aliasing_ppgtt()
661 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
662 i915_gem_object_unlock(ppgtt->vm.scratch[0]); in init_aliasing_ppgtt()
672 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
675 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
677 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); in init_aliasing_ppgtt()
678 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
680 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); in init_aliasing_ppgtt()
681 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
683 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
687 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
689 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
701 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
703 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in fini_aliasing_ppgtt()
704 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in fini_aliasing_ppgtt()
728 atomic_set(&ggtt->vm.open, 0); in ggtt_cleanup_hw()
731 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
733 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
735 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) in ggtt_cleanup_hw()
745 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
747 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
748 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
779 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); in i915_ggtt_driver_late_release()
780 dma_resv_fini(&ggtt->vm._resv); in i915_ggtt_driver_late_release()
819 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
844 kref_init(&ggtt->vm.resv_ref); in ggtt_probe_common()
845 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
854 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) in ggtt_probe_common()
857 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
858 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
879 static void gen6_gmch_remove(struct i915_address_space *vm) in gen6_gmch_remove() argument
881 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_gmch_remove()
884 free_scratch(vm); in gen6_gmch_remove()
895 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
912 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
914 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
915 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
916 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
917 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
919 ggtt->vm.clear_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
921 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
928 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
929 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
930 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
936 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen8_gmch_probe()
937 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen8_gmch_probe()
938 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen8_gmch_probe()
939 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen8_gmch_probe()
941 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
943 setup_private_pat(ggtt->vm.gt->uncore); in gen8_gmch_probe()
1041 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
1062 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
1064 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1066 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1068 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1069 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1070 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1071 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1076 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1078 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1080 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1082 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1084 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1086 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen6_gmch_probe()
1087 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen6_gmch_probe()
1088 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen6_gmch_probe()
1089 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen6_gmch_probe()
1094 static void i915_gmch_remove(struct i915_address_space *vm) in i915_gmch_remove() argument
1101 struct drm_i915_private *i915 = ggtt->vm.i915; in i915_gmch_probe()
1111 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); in i915_gmch_probe()
1116 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in i915_gmch_probe()
1124 ggtt->vm.insert_page = i915_ggtt_insert_page; in i915_gmch_probe()
1125 ggtt->vm.insert_entries = i915_ggtt_insert_entries; in i915_gmch_probe()
1126 ggtt->vm.clear_range = i915_ggtt_clear_range; in i915_gmch_probe()
1127 ggtt->vm.cleanup = i915_gmch_remove; in i915_gmch_probe()
1131 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in i915_gmch_probe()
1132 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in i915_gmch_probe()
1133 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in i915_gmch_probe()
1134 ggtt->vm.vma_ops.clear_pages = clear_pages; in i915_gmch_probe()
1148 ggtt->vm.gt = gt; in ggtt_probe_hw()
1149 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1150 ggtt->vm.dma = i915->drm.dev; in ggtt_probe_hw()
1151 dma_resv_init(&ggtt->vm._resv); in ggtt_probe_hw()
1160 dma_resv_fini(&ggtt->vm._resv); in ggtt_probe_hw()
1164 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1168 ggtt->vm.total >> 20); in ggtt_probe_hw()
1169 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1171 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1174 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1178 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1179 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1183 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1247 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_resume()
1250 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_resume()
1253 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_resume()
1256 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_resume()
1262 vma->ops->bind_vma(&ggtt->vm, NULL, vma, in i915_ggtt_resume()
1271 atomic_set(&ggtt->vm.open, open); in i915_ggtt_resume()
1277 if (GRAPHICS_VER(ggtt->vm.i915) >= 8) in i915_ggtt_resume()
1278 setup_private_pat(ggtt->vm.gt->uncore); in i915_ggtt_resume()
1572 drm_err(&vma->vm->i915->drm, in i915_get_ggtt_vma_pages()