Lines Matching +full:height +full:- +full:mm
1 // SPDX-License-Identifier: MIT
41 if (node->color != color) in i915_ggtt_color_adjust()
42 *end -= I915_GTT_PAGE_SIZE; in i915_ggtt_color_adjust()
47 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
49 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
51 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
53 /* Only VLV supports read-only GGTT mappings */ in ggtt_init_hw()
54 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
57 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
59 if (ggtt->mappable_end) { in ggtt_init_hw()
60 if (!io_mapping_init_wc(&ggtt->iomap, in ggtt_init_hw()
61 ggtt->gmadr.start, in ggtt_init_hw()
62 ggtt->mappable_end)) { in ggtt_init_hw()
63 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
64 return -EIO; in ggtt_init_hw()
67 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, in ggtt_init_hw()
68 ggtt->mappable_end); in ggtt_init_hw()
77 * i915_ggtt_init_hw - Initialize GGTT hardware
90 ret = ggtt_init_hw(&i915->ggtt); in i915_ggtt_init_hw()
99 * unmapping anything from the GTT when VT-d is enabled.
124 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend()
127 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend()
129 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend()
130 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_ggtt_suspend()
138 drm_mm_remove_node(&vma->node); in i915_ggtt_suspend()
142 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend()
143 ggtt->invalidate(ggtt); in i915_ggtt_suspend()
144 atomic_set(&ggtt->vm.open, open); in i915_ggtt_suspend()
146 mutex_unlock(&ggtt->vm.mutex); in i915_ggtt_suspend()
148 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_suspend()
153 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
155 spin_lock_irq(&uncore->lock); in gen6_ggtt_invalidate()
158 spin_unlock_irq(&uncore->lock); in gen6_ggtt_invalidate()
163 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
174 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in guc_ggtt_invalidate()
175 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
216 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_page()
220 ggtt->invalidate(ggtt); in gen8_ggtt_insert_page()
240 gte = (gen8_pte_t __iomem *)ggtt->gsm; in gen8_ggtt_insert_entries()
241 gte += vma->node.start / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
242 end = gte + vma->node.size / I915_GTT_PAGE_SIZE; in gen8_ggtt_insert_entries()
244 for_each_sgt_daddr(addr, iter, vma->pages) in gen8_ggtt_insert_entries()
250 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
256 ggtt->invalidate(ggtt); in gen8_ggtt_insert_entries()
267 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_page()
269 iowrite32(vm->pte_encode(addr, level, flags), pte); in gen6_ggtt_insert_page()
271 ggtt->invalidate(ggtt); in gen6_ggtt_insert_page()
278 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
291 gte = (gen6_pte_t __iomem *)ggtt->gsm; in gen6_ggtt_insert_entries()
292 gte += vma->node.start / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
293 end = gte + vma->node.size / I915_GTT_PAGE_SIZE; in gen6_ggtt_insert_entries()
295 for_each_sgt_daddr(addr, iter, vma->pages) in gen6_ggtt_insert_entries()
296 iowrite32(vm->pte_encode(addr, level, flags), gte++); in gen6_ggtt_insert_entries()
301 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
307 ggtt->invalidate(ggtt); in gen6_ggtt_insert_entries()
321 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
323 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; in gen8_ggtt_clear_range()
324 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen8_ggtt_clear_range()
345 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
359 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); in bxt_vtd_ggtt_insert_page__cb()
360 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
387 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); in bxt_vtd_ggtt_insert_entries__cb()
388 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
410 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; in gen6_ggtt_clear_range()
411 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen6_ggtt_clear_range()
419 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
444 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, in i915_ggtt_insert_entries()
460 struct drm_i915_gem_object *obj = vma->obj; in ggtt_bind_vma()
473 vm->insert_entries(vm, vma, cache_level, pte_flags); in ggtt_bind_vma()
474 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; in ggtt_bind_vma()
479 vm->clear_range(vm, vma->node.start, vma->size); in ggtt_unbind_vma()
487 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
490 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); in ggtt_reserve_guc_top()
491 size = ggtt->vm.total - GUC_GGTT_TOP; in ggtt_reserve_guc_top()
493 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, in ggtt_reserve_guc_top()
497 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
505 if (drm_mm_node_allocated(&ggtt->uc_fw)) in ggtt_release_guc_top()
506 drm_mm_remove_node(&ggtt->uc_fw); in ggtt_release_guc_top()
512 if (drm_mm_node_allocated(&ggtt->error_capture)) in cleanup_init_ggtt()
513 drm_mm_remove_node(&ggtt->error_capture); in cleanup_init_ggtt()
514 mutex_destroy(&ggtt->error_mutex); in cleanup_init_ggtt()
535 * non-WOPCM memory. If GuC is not present or not in use we still need a in init_ggtt()
539 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, in init_ggtt()
540 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); in init_ggtt()
546 mutex_init(&ggtt->error_mutex); in init_ggtt()
547 if (ggtt->mappable_end) { in init_ggtt()
560 * for an error-capture, remain silent. We can afford not in init_ggtt()
566 ggtt->error_capture.size = I915_GTT_PAGE_SIZE; in init_ggtt()
567 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; in init_ggtt()
568 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) in init_ggtt()
569 drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
570 &ggtt->error_capture, in init_ggtt()
571 ggtt->error_capture.size, 0, in init_ggtt()
572 ggtt->error_capture.color, in init_ggtt()
573 0, ggtt->mappable_end, in init_ggtt()
576 if (drm_mm_node_allocated(&ggtt->error_capture)) in init_ggtt()
577 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
579 ggtt->error_capture.start, in init_ggtt()
580 ggtt->error_capture.start + ggtt->error_capture.size); in init_ggtt()
591 /* Clear any non-preallocated blocks */ in init_ggtt()
592 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
593 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
596 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
597 hole_end - hole_start); in init_ggtt()
601 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
620 if (i915_gem_object_is_readonly(vma->obj)) in aliasing_gtt_bind_vma()
624 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
628 vm->insert_entries(vm, vma, cache_level, pte_flags); in aliasing_gtt_bind_vma()
635 vm->clear_range(vm, vma->node.start, vma->size); in aliasing_gtt_unbind_vma()
638 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); in aliasing_gtt_unbind_vma()
647 ppgtt = i915_ppgtt_create(ggtt->vm.gt); in init_aliasing_ppgtt()
651 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
652 err = -ENODEV; in init_aliasing_ppgtt()
656 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
660 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); in init_aliasing_ppgtt()
661 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
662 i915_gem_object_unlock(ppgtt->vm.scratch[0]); in init_aliasing_ppgtt()
667 * Note we only pre-allocate as far as the end of the global in init_aliasing_ppgtt()
668 * GTT. On 48b / 4-level page-tables, the difference is very, in init_aliasing_ppgtt()
672 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
674 ggtt->alias = ppgtt; in init_aliasing_ppgtt()
675 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
677 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); in init_aliasing_ppgtt()
678 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
680 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); in init_aliasing_ppgtt()
681 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
683 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
687 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
689 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
697 ppgtt = fetch_and_zero(&ggtt->alias); in fini_aliasing_ppgtt()
701 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
703 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in fini_aliasing_ppgtt()
704 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in fini_aliasing_ppgtt()
711 ret = init_ggtt(&i915->ggtt); in i915_init_ggtt()
716 ret = init_aliasing_ppgtt(&i915->ggtt); in i915_init_ggtt()
718 cleanup_init_ggtt(&i915->ggtt); in i915_init_ggtt()
728 atomic_set(&ggtt->vm.open, 0); in ggtt_cleanup_hw()
731 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
733 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
735 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) in ggtt_cleanup_hw()
738 if (drm_mm_node_allocated(&ggtt->error_capture)) in ggtt_cleanup_hw()
739 drm_mm_remove_node(&ggtt->error_capture); in ggtt_cleanup_hw()
740 mutex_destroy(&ggtt->error_mutex); in ggtt_cleanup_hw()
745 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
747 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
748 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
750 arch_phys_wc_del(ggtt->mtrr); in ggtt_cleanup_hw()
752 if (ggtt->iomap.size) in ggtt_cleanup_hw()
753 io_mapping_fini(&ggtt->iomap); in ggtt_cleanup_hw()
757 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
762 struct i915_ggtt *ggtt = &i915->ggtt; in i915_ggtt_driver_release()
771 * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
777 struct i915_ggtt *ggtt = &i915->ggtt; in i915_ggtt_driver_late_release()
779 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); in i915_ggtt_driver_late_release()
780 dma_resv_fini(&ggtt->vm._resv); in i915_ggtt_driver_late_release()
819 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
820 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in ggtt_probe_common()
836 ggtt->gsm = ioremap(phys_addr, size); in ggtt_probe_common()
838 ggtt->gsm = ioremap_wc(phys_addr, size); in ggtt_probe_common()
839 if (!ggtt->gsm) { in ggtt_probe_common()
840 drm_err(&i915->drm, "Failed to map the ggtt page table\n"); in ggtt_probe_common()
841 return -ENOMEM; in ggtt_probe_common()
844 kref_init(&ggtt->vm.resv_ref); in ggtt_probe_common()
845 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
847 drm_err(&i915->drm, "Scratch setup failed\n"); in ggtt_probe_common()
849 iounmap(ggtt->gsm); in ggtt_probe_common()
854 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) in ggtt_probe_common()
857 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
858 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
868 GEM_BUG_ON(vma->pages); in ggtt_set_pages()
874 vma->page_sizes = vma->obj->mm.page_sizes; in ggtt_set_pages()
883 iounmap(ggtt->gsm); in gen6_gmch_remove()
895 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
896 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in gen8_gmch_probe()
902 ggtt->gmadr = pci_resource(pdev, 2); in gen8_gmch_probe()
903 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen8_gmch_probe()
912 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
914 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
915 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
916 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
917 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
919 ggtt->vm.clear_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
921 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
924 * Serialize GTT updates with aperture access on BXT if VT-d is on, in gen8_gmch_probe()
928 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
929 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
930 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
934 ggtt->invalidate = gen8_ggtt_invalidate; in gen8_gmch_probe()
936 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen8_gmch_probe()
937 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen8_gmch_probe()
938 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen8_gmch_probe()
939 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen8_gmch_probe()
941 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
943 setup_private_pat(ggtt->vm.gt->uncore); in gen8_gmch_probe()
1041 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
1042 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in gen6_gmch_probe()
1046 ggtt->gmadr = pci_resource(pdev, 2); in gen6_gmch_probe()
1047 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen6_gmch_probe()
1053 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { in gen6_gmch_probe()
1054 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n", in gen6_gmch_probe()
1055 &ggtt->mappable_end); in gen6_gmch_probe()
1056 return -ENXIO; in gen6_gmch_probe()
1062 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
1064 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1066 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1068 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1069 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1070 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1071 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1073 ggtt->invalidate = gen6_ggtt_invalidate; in gen6_gmch_probe()
1076 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1078 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1080 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1082 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1084 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1086 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen6_gmch_probe()
1087 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen6_gmch_probe()
1088 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen6_gmch_probe()
1089 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen6_gmch_probe()
1101 struct drm_i915_private *i915 = ggtt->vm.i915; in i915_gmch_probe()
1105 ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL); in i915_gmch_probe()
1107 drm_err(&i915->drm, "failed to set up gmch\n"); in i915_gmch_probe()
1108 return -EIO; in i915_gmch_probe()
1111 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); in i915_gmch_probe()
1113 ggtt->gmadr = in i915_gmch_probe()
1114 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); in i915_gmch_probe()
1116 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in i915_gmch_probe()
1119 drm_notice(&i915->drm, in i915_gmch_probe()
1121 ggtt->do_idle_maps = true; in i915_gmch_probe()
1124 ggtt->vm.insert_page = i915_ggtt_insert_page; in i915_gmch_probe()
1125 ggtt->vm.insert_entries = i915_ggtt_insert_entries; in i915_gmch_probe()
1126 ggtt->vm.clear_range = i915_ggtt_clear_range; in i915_gmch_probe()
1127 ggtt->vm.cleanup = i915_gmch_remove; in i915_gmch_probe()
1129 ggtt->invalidate = gmch_ggtt_invalidate; in i915_gmch_probe()
1131 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in i915_gmch_probe()
1132 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in i915_gmch_probe()
1133 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in i915_gmch_probe()
1134 ggtt->vm.vma_ops.clear_pages = clear_pages; in i915_gmch_probe()
1136 if (unlikely(ggtt->do_idle_maps)) in i915_gmch_probe()
1137 drm_notice(&i915->drm, in i915_gmch_probe()
1145 struct drm_i915_private *i915 = gt->i915; in ggtt_probe_hw()
1148 ggtt->vm.gt = gt; in ggtt_probe_hw()
1149 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1150 ggtt->vm.dma = i915->drm.dev; in ggtt_probe_hw()
1151 dma_resv_init(&ggtt->vm._resv); in ggtt_probe_hw()
1160 dma_resv_fini(&ggtt->vm._resv); in ggtt_probe_hw()
1164 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1165 drm_err(&i915->drm, in ggtt_probe_hw()
1168 ggtt->vm.total >> 20); in ggtt_probe_hw()
1169 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1170 ggtt->mappable_end = in ggtt_probe_hw()
1171 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1174 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1175 drm_err(&i915->drm, in ggtt_probe_hw()
1178 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1179 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1183 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1184 drm_dbg(&i915->drm, "GMADR size = %lluM\n", in ggtt_probe_hw()
1185 (u64)ggtt->mappable_end >> 20); in ggtt_probe_hw()
1186 drm_dbg(&i915->drm, "DSM size = %lluM\n", in ggtt_probe_hw()
1193 * i915_ggtt_probe_hw - Probe GGTT hardware location
1200 ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); in i915_ggtt_probe_hw()
1205 drm_info(&i915->drm, "VT-d active for gfx access\n"); in i915_ggtt_probe_hw()
1213 return -EIO; in i915_ggtt_enable_hw()
1220 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); in i915_ggtt_enable_guc()
1222 ggtt->invalidate = guc_ggtt_invalidate; in i915_ggtt_enable_guc()
1224 ggtt->invalidate(ggtt); in i915_ggtt_enable_guc()
1230 if (ggtt->invalidate == gen8_ggtt_invalidate) in i915_ggtt_disable_guc()
1234 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); in i915_ggtt_disable_guc()
1236 ggtt->invalidate = gen8_ggtt_invalidate; in i915_ggtt_disable_guc()
1238 ggtt->invalidate(ggtt); in i915_ggtt_disable_guc()
1247 intel_gt_check_and_clear_faults(ggtt->vm.gt); in i915_ggtt_resume()
1250 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_resume()
1253 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_resume()
1256 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_resume()
1257 struct drm_i915_gem_object *obj = vma->obj; in i915_ggtt_resume()
1259 atomic_read(&vma->flags) & I915_VMA_BIND_MASK; in i915_ggtt_resume()
1262 vma->ops->bind_vma(&ggtt->vm, NULL, vma, in i915_ggtt_resume()
1263 obj ? obj->cache_level : 0, in i915_ggtt_resume()
1266 flush |= fetch_and_zero(&obj->write_domain); in i915_ggtt_resume()
1267 obj->read_domains |= I915_GEM_DOMAIN_GTT; in i915_ggtt_resume()
1271 atomic_set(&ggtt->vm.open, open); in i915_ggtt_resume()
1272 ggtt->invalidate(ggtt); in i915_ggtt_resume()
1277 if (GRAPHICS_VER(ggtt->vm.i915) >= 8) in i915_ggtt_resume()
1278 setup_private_pat(ggtt->vm.gt->uncore); in i915_ggtt_resume()
1285 unsigned int width, unsigned int height, in rotate_pages() argument
1295 src_idx = src_stride * (height - 1) + column + offset; in rotate_pages()
1296 for (row = 0; row < height; row++) { in rotate_pages()
1297 st->nents++; in rotate_pages()
1308 src_idx -= src_stride; in rotate_pages()
1311 left = (dst_stride - height) * I915_GTT_PAGE_SIZE; in rotate_pages()
1316 st->nents++; in rotate_pages()
1337 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_rotate_pages()
1340 int ret = -ENOMEM; in intel_rotate_pages()
1352 st->nents = 0; in intel_rotate_pages()
1353 sg = st->sgl; in intel_rotate_pages()
1355 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) in intel_rotate_pages()
1356 sg = rotate_pages(obj, rot_info->plane[i].offset, in intel_rotate_pages()
1357 rot_info->plane[i].width, rot_info->plane[i].height, in intel_rotate_pages()
1358 rot_info->plane[i].src_stride, in intel_rotate_pages()
1359 rot_info->plane[i].dst_stride, in intel_rotate_pages()
1368 …drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)… in intel_rotate_pages()
1369 obj->base.size, rot_info->plane[0].width, in intel_rotate_pages()
1370 rot_info->plane[0].height, size); in intel_rotate_pages()
1377 unsigned int width, unsigned int height, in remap_pages() argument
1383 for (row = 0; row < height; row++) { in remap_pages()
1400 st->nents++; in remap_pages()
1408 left -= length; in remap_pages()
1411 offset += src_stride - width; in remap_pages()
1413 left = (dst_stride - width) * I915_GTT_PAGE_SIZE; in remap_pages()
1418 st->nents++; in remap_pages()
1439 struct drm_i915_private *i915 = to_i915(obj->base.dev); in intel_remap_pages()
1442 int ret = -ENOMEM; in intel_remap_pages()
1454 st->nents = 0; in intel_remap_pages()
1455 sg = st->sgl; in intel_remap_pages()
1457 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { in intel_remap_pages()
1458 sg = remap_pages(obj, rem_info->plane[i].offset, in intel_remap_pages()
1459 rem_info->plane[i].width, rem_info->plane[i].height, in intel_remap_pages()
1460 rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, in intel_remap_pages()
1472 …drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages… in intel_remap_pages()
1473 obj->base.size, rem_info->plane[0].width, in intel_remap_pages()
1474 rem_info->plane[0].height, size); in intel_remap_pages()
1485 unsigned int count = view->partial.size; in intel_partial_pages()
1487 int ret = -ENOMEM; in intel_partial_pages()
1497 iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); in intel_partial_pages()
1500 sg = st->sgl; in intel_partial_pages()
1501 st->nents = 0; in intel_partial_pages()
1505 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), in intel_partial_pages()
1512 st->nents++; in intel_partial_pages()
1513 count -= len >> PAGE_SHIFT; in intel_partial_pages()
1538 * The vma->pages are only valid within the lifespan of the borrowed in i915_get_ggtt_vma_pages()
1539 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so in i915_get_ggtt_vma_pages()
1540 * must be the vma->pages. A simple rule is that vma->pages must only in i915_get_ggtt_vma_pages()
1541 * be accessed when the obj->mm.pages are pinned. in i915_get_ggtt_vma_pages()
1543 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); in i915_get_ggtt_vma_pages()
1545 switch (vma->ggtt_view.type) { in i915_get_ggtt_vma_pages()
1547 GEM_BUG_ON(vma->ggtt_view.type); in i915_get_ggtt_vma_pages()
1550 vma->pages = vma->obj->mm.pages; in i915_get_ggtt_vma_pages()
1554 vma->pages = in i915_get_ggtt_vma_pages()
1555 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); in i915_get_ggtt_vma_pages()
1559 vma->pages = in i915_get_ggtt_vma_pages()
1560 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); in i915_get_ggtt_vma_pages()
1564 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); in i915_get_ggtt_vma_pages()
1569 if (IS_ERR(vma->pages)) { in i915_get_ggtt_vma_pages()
1570 ret = PTR_ERR(vma->pages); in i915_get_ggtt_vma_pages()
1571 vma->pages = NULL; in i915_get_ggtt_vma_pages()
1572 drm_err(&vma->vm->i915->drm, in i915_get_ggtt_vma_pages()
1574 vma->ggtt_view.type, ret); in i915_get_ggtt_vma_pages()