Lines Matching refs:ggtt
133 i915->ggtt.invalidate(i915); in i915_ggtt_invalidate()
2049 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); in pd_vma_bind() local
2056 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; in pd_vma_bind()
2099 struct i915_ggtt *ggtt = &i915->ggtt; in pd_vma_create() local
2103 GEM_BUG_ON(size > ggtt->vm.total); in pd_vma_create()
2111 vma->vm = &ggtt->vm; in pd_vma_create()
2164 struct i915_ggtt * const ggtt = &i915->ggtt; in gen6_ppgtt_create() local
2192 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; in gen6_ppgtt_create()
2422 struct i915_ggtt *ggtt = &dev_priv->ggtt; in i915_gem_suspend_gtt_mappings() local
2432 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_gem_suspend_gtt_mappings()
2474 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_page() local
2476 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); in gen8_ggtt_insert_page()
2480 ggtt->invalidate(vm->i915); in gen8_ggtt_insert_page()
2488 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_entries() local
2499 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; in gen8_ggtt_insert_entries()
2508 ggtt->invalidate(vm->i915); in gen8_ggtt_insert_entries()
2517 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_page() local
2519 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); in gen6_ggtt_insert_page()
2523 ggtt->invalidate(vm->i915); in gen6_ggtt_insert_page()
2537 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_entries() local
2538 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; in gen6_ggtt_insert_entries()
2549 ggtt->invalidate(vm->i915); in gen6_ggtt_insert_entries()
2560 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_clear_range() local
2566 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; in gen8_ggtt_clear_range()
2567 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen8_ggtt_clear_range()
2676 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_clear_range() local
2680 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; in gen6_ggtt_clear_range()
2681 const int max_entries = ggtt_total_entries(ggtt) - first_entry; in gen6_ggtt_clear_range()
2823 struct i915_ggtt *ggtt = &dev_priv->ggtt; in i915_gem_gtt_finish_pages() local
2825 if (unlikely(ggtt->do_idle_maps)) { in i915_gem_gtt_finish_pages()
2871 struct i915_ggtt *ggtt = &i915->ggtt; in i915_gem_init_aliasing_ppgtt() local
2879 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in i915_gem_init_aliasing_ppgtt()
2890 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); in i915_gem_init_aliasing_ppgtt()
2896 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); in i915_gem_init_aliasing_ppgtt()
2897 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in i915_gem_init_aliasing_ppgtt()
2899 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); in i915_gem_init_aliasing_ppgtt()
2900 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in i915_gem_init_aliasing_ppgtt()
2911 struct i915_ggtt *ggtt = &i915->ggtt; in i915_gem_fini_aliasing_ppgtt() local
2920 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in i915_gem_fini_aliasing_ppgtt()
2921 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in i915_gem_fini_aliasing_ppgtt()
2935 struct i915_ggtt *ggtt = &dev_priv->ggtt; in i915_gem_init_ggtt() local
2945 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, in i915_gem_init_ggtt()
2947 0, ggtt->mappable_end, in i915_gem_init_ggtt()
2953 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in i915_gem_init_ggtt()
2956 ggtt->vm.clear_range(&ggtt->vm, hole_start, in i915_gem_init_ggtt()
2961 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in i915_gem_init_ggtt()
2972 drm_mm_remove_node(&ggtt->error_capture); in i915_gem_init_ggtt()
2982 struct i915_ggtt *ggtt = &dev_priv->ggtt; in i915_ggtt_cleanup_hw() local
2986 ggtt->vm.closed = true; in i915_ggtt_cleanup_hw()
2991 GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); in i915_ggtt_cleanup_hw()
2992 list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) in i915_ggtt_cleanup_hw()
2995 if (drm_mm_node_allocated(&ggtt->error_capture)) in i915_ggtt_cleanup_hw()
2996 drm_mm_remove_node(&ggtt->error_capture); in i915_ggtt_cleanup_hw()
2998 if (drm_mm_initialized(&ggtt->vm.mm)) { in i915_ggtt_cleanup_hw()
3000 i915_address_space_fini(&ggtt->vm); in i915_ggtt_cleanup_hw()
3003 ggtt->vm.cleanup(&ggtt->vm); in i915_ggtt_cleanup_hw()
3013 arch_phys_wc_del(ggtt->mtrr); in i915_ggtt_cleanup_hw()
3014 io_mapping_fini(&ggtt->iomap); in i915_ggtt_cleanup_hw()
3053 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) in ggtt_probe_common() argument
3055 struct drm_i915_private *dev_priv = ggtt->vm.i915; in ggtt_probe_common()
3071 ggtt->gsm = ioremap_nocache(phys_addr, size); in ggtt_probe_common()
3073 ggtt->gsm = ioremap_wc(phys_addr, size); in ggtt_probe_common()
3074 if (!ggtt->gsm) { in ggtt_probe_common()
3079 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); in ggtt_probe_common()
3083 iounmap(ggtt->gsm); in ggtt_probe_common()
3352 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_gmch_remove() local
3354 iounmap(ggtt->gsm); in gen6_gmch_remove()
3383 static int gen8_gmch_probe(struct i915_ggtt *ggtt) in gen8_gmch_probe() argument
3385 struct drm_i915_private *dev_priv = ggtt->vm.i915; in gen8_gmch_probe()
3392 ggtt->gmadr = in gen8_gmch_probe()
3395 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen8_gmch_probe()
3409 ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; in gen8_gmch_probe()
3410 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
3411 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
3412 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
3414 ggtt->vm.clear_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
3416 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
3420 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
3421 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
3422 if (ggtt->vm.clear_range != nop_clear_range) in gen8_gmch_probe()
3423 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; in gen8_gmch_probe()
3426 ggtt->invalidate = gen6_ggtt_invalidate; in gen8_gmch_probe()
3428 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen8_gmch_probe()
3429 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen8_gmch_probe()
3430 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen8_gmch_probe()
3431 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen8_gmch_probe()
3435 return ggtt_probe_common(ggtt, size); in gen8_gmch_probe()
3438 static int gen6_gmch_probe(struct i915_ggtt *ggtt) in gen6_gmch_probe() argument
3440 struct drm_i915_private *dev_priv = ggtt->vm.i915; in gen6_gmch_probe()
3446 ggtt->gmadr = in gen6_gmch_probe()
3449 ggtt->mappable_end = resource_size(&ggtt->gmadr); in gen6_gmch_probe()
3454 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { in gen6_gmch_probe()
3455 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); in gen6_gmch_probe()
3467 ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT; in gen6_gmch_probe()
3469 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
3470 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
3471 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
3472 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
3474 ggtt->invalidate = gen6_ggtt_invalidate; in gen6_gmch_probe()
3477 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
3479 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
3481 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
3483 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
3485 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
3487 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in gen6_gmch_probe()
3488 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in gen6_gmch_probe()
3489 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in gen6_gmch_probe()
3490 ggtt->vm.vma_ops.clear_pages = clear_pages; in gen6_gmch_probe()
3492 return ggtt_probe_common(ggtt, size); in gen6_gmch_probe()
3500 static int i915_gmch_probe(struct i915_ggtt *ggtt) in i915_gmch_probe() argument
3502 struct drm_i915_private *dev_priv = ggtt->vm.i915; in i915_gmch_probe()
3512 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); in i915_gmch_probe()
3514 ggtt->gmadr = in i915_gmch_probe()
3516 ggtt->mappable_end); in i915_gmch_probe()
3518 ggtt->do_idle_maps = needs_idle_maps(dev_priv); in i915_gmch_probe()
3519 ggtt->vm.insert_page = i915_ggtt_insert_page; in i915_gmch_probe()
3520 ggtt->vm.insert_entries = i915_ggtt_insert_entries; in i915_gmch_probe()
3521 ggtt->vm.clear_range = i915_ggtt_clear_range; in i915_gmch_probe()
3522 ggtt->vm.cleanup = i915_gmch_remove; in i915_gmch_probe()
3524 ggtt->invalidate = gmch_ggtt_invalidate; in i915_gmch_probe()
3526 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; in i915_gmch_probe()
3527 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; in i915_gmch_probe()
3528 ggtt->vm.vma_ops.set_pages = ggtt_set_pages; in i915_gmch_probe()
3529 ggtt->vm.vma_ops.clear_pages = clear_pages; in i915_gmch_probe()
3531 if (unlikely(ggtt->do_idle_maps)) in i915_gmch_probe()
3543 struct i915_ggtt *ggtt = &dev_priv->ggtt; in i915_ggtt_probe_hw() local
3546 ggtt->vm.i915 = dev_priv; in i915_ggtt_probe_hw()
3547 ggtt->vm.dma = &dev_priv->drm.pdev->dev; in i915_ggtt_probe_hw()
3550 ret = i915_gmch_probe(ggtt); in i915_ggtt_probe_hw()
3552 ret = gen6_gmch_probe(ggtt); in i915_ggtt_probe_hw()
3554 ret = gen8_gmch_probe(ggtt); in i915_ggtt_probe_hw()
3564 ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP); in i915_ggtt_probe_hw()
3565 ggtt->mappable_end = in i915_ggtt_probe_hw()
3566 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in i915_ggtt_probe_hw()
3569 if ((ggtt->vm.total - 1) >> 32) { in i915_ggtt_probe_hw()
3572 ggtt->vm.total >> 20); in i915_ggtt_probe_hw()
3573 ggtt->vm.total = 1ULL << 32; in i915_ggtt_probe_hw()
3574 ggtt->mappable_end = in i915_ggtt_probe_hw()
3575 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in i915_ggtt_probe_hw()
3578 if (ggtt->mappable_end > ggtt->vm.total) { in i915_ggtt_probe_hw()
3581 &ggtt->mappable_end, ggtt->vm.total); in i915_ggtt_probe_hw()
3582 ggtt->mappable_end = ggtt->vm.total; in i915_ggtt_probe_hw()
3586 DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); in i915_ggtt_probe_hw()
3587 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); in i915_ggtt_probe_hw()
3602 struct i915_ggtt *ggtt = &dev_priv->ggtt; in i915_ggtt_init_hw() local
3613 i915_address_space_init(&ggtt->vm, dev_priv); in i915_ggtt_init_hw()
3616 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); in i915_ggtt_init_hw()
3619 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; in i915_ggtt_init_hw()
3622 if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, in i915_ggtt_init_hw()
3623 dev_priv->ggtt.gmadr.start, in i915_ggtt_init_hw()
3624 dev_priv->ggtt.mappable_end)) { in i915_ggtt_init_hw()
3629 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); in i915_ggtt_init_hw()
3642 ggtt->vm.cleanup(&ggtt->vm); in i915_ggtt_init_hw()
3656 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate); in i915_ggtt_enable_guc()
3658 i915->ggtt.invalidate = guc_ggtt_invalidate; in i915_ggtt_enable_guc()
3666 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); in i915_ggtt_disable_guc()
3668 i915->ggtt.invalidate = gen6_ggtt_invalidate; in i915_ggtt_disable_guc()
3675 struct i915_ggtt *ggtt = &dev_priv->ggtt; in i915_gem_restore_gtt_mappings() local
3681 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_gem_restore_gtt_mappings()
3683 ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ in i915_gem_restore_gtt_mappings()
3686 GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); in i915_gem_restore_gtt_mappings()
3687 list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) { in i915_gem_restore_gtt_mappings()
3703 ggtt->vm.closed = false; in i915_gem_restore_gtt_mappings()