/Linux-v4.19/drivers/gpu/drm/i915/ |
D | i915_vma.c | 37 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument 46 if (!vma->node.stack) { in vma_print_allocator() 48 vma->node.start, vma->node.size, reason); in vma_print_allocator() 52 depot_fetch_stack(vma->node.stack, &trace); in vma_print_allocator() 55 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator() 60 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument 68 struct i915_vma *vma; member 74 __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) in __i915_vma_retire() argument 76 struct drm_i915_gem_object *obj = vma->obj; in __i915_vma_retire() 78 GEM_BUG_ON(!i915_vma_is_active(vma)); in __i915_vma_retire() [all …]
|
D | i915_vma.h | 143 static inline bool i915_vma_is_active(struct i915_vma *vma) in i915_vma_is_active() argument 145 return vma->active_count; in i915_vma_is_active() 148 int __must_check i915_vma_move_to_active(struct i915_vma *vma, 152 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument 154 return vma->flags & I915_VMA_GGTT; in i915_vma_is_ggtt() 157 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument 159 return vma->flags & I915_VMA_GGTT_WRITE; in i915_vma_has_ggtt_write() 162 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) in i915_vma_set_ggtt_write() argument 164 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_vma_set_ggtt_write() 165 vma->flags |= I915_VMA_GGTT_WRITE; in i915_vma_set_ggtt_write() [all …]
|
D | i915_gem_fence_reg.c | 61 struct i915_vma *vma) in i965_write_fence_reg() argument 79 if (vma) { in i965_write_fence_reg() 80 unsigned int stride = i915_gem_object_get_stride(vma->obj); in i965_write_fence_reg() 82 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); in i965_write_fence_reg() 83 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE)); in i965_write_fence_reg() 84 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE)); in i965_write_fence_reg() 87 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32; in i965_write_fence_reg() 88 val |= vma->node.start; in i965_write_fence_reg() 90 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y) in i965_write_fence_reg() 117 struct i915_vma *vma) in i915_write_fence_reg() argument [all …]
|
D | i915_gem_evict.c | 83 struct i915_vma *vma, in mark_free() argument 87 if (i915_vma_is_pinned(vma)) in mark_free() 90 if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) in mark_free() 93 list_add(&vma->evict_link, unwind); in mark_free() 94 return drm_mm_scan_add_block(scan, &vma->node); in mark_free() 135 struct i915_vma *vma, *next; in i915_gem_evict_something() local 180 list_for_each_entry(vma, *phase, vm_link) in i915_gem_evict_something() 181 if (mark_free(&scan, vma, flags, &eviction_list)) in i915_gem_evict_something() 186 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { in i915_gem_evict_something() 187 ret = drm_mm_scan_remove_block(&scan, &vma->node); in i915_gem_evict_something() [all …]
|
/Linux-v4.19/mm/ |
D | mmap.c | 75 struct vm_area_struct *vma, struct vm_area_struct *prev, 126 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 128 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 131 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 132 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot() 137 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot() 143 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 146 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() 148 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 152 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() [all …]
|
D | nommu.c | 99 struct vm_area_struct *vma; in kobjsize() local 101 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 102 if (vma) in kobjsize() 103 return vma->vm_end - vma->vm_start; in kobjsize() 118 struct vm_area_struct *vma; in __get_user_pages() local 131 vma = find_vma(mm, start); in __get_user_pages() 132 if (!vma) in __get_user_pages() 136 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages() 137 !(vm_flags & vma->vm_flags)) in __get_user_pages() 146 vmas[i] = vma; in __get_user_pages() [all …]
|
D | mremap.c | 59 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 84 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument 86 if (vma->vm_file) in take_rmap_locks() 87 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks() 88 if (vma->anon_vma) in take_rmap_locks() 89 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 92 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument 94 if (vma->anon_vma) in drop_rmap_locks() 95 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks() 96 if (vma->vm_file) in drop_rmap_locks() [all …]
|
D | madvise.c | 55 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument 59 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 62 unsigned long new_flags = vma->vm_flags; in madvise_behavior() 78 if (vma->vm_flags & VM_IO) { in madvise_behavior() 86 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior() 99 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) { in madvise_behavior() 107 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 120 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior() 133 if (new_flags == vma->vm_flags) { in madvise_behavior() 134 *prev = vma; in madvise_behavior() [all …]
|
D | mprotect.c | 38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 42 struct mm_struct *mm = vma->vm_mm; in change_pte_range() 62 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 65 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 66 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() 69 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 84 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 89 if (is_cow_mapping(vma->vm_flags) && in change_pte_range() 121 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range() 165 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument [all …]
|
D | memory.c | 615 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument 618 while (vma) { in free_pgtables() 619 struct vm_area_struct *next = vma->vm_next; in free_pgtables() 620 unsigned long addr = vma->vm_start; in free_pgtables() 626 unlink_anon_vmas(vma); in free_pgtables() 627 unlink_file_vma(vma); in free_pgtables() 629 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 630 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 636 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() 638 vma = next; in free_pgtables() [all …]
|
D | mlock.c | 375 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument 386 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 399 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill() 445 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument 448 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range() 465 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range() 497 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range() 519 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument 522 struct mm_struct *mm = vma->vm_mm; in mlock_fixup() 527 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup() [all …]
|
D | pgtable-generic.c | 54 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 60 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 61 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 68 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 72 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 74 flush_tlb_page(vma, address); in ptep_clear_flush_young() 80 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument 83 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 87 flush_tlb_page(vma, address); in ptep_clear_flush() 95 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument [all …]
|
D | rmap.c | 137 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 141 avc->vma = vma; in anon_vma_chain_link() 143 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 175 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument 177 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 187 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare() 199 if (likely(!vma->anon_vma)) { in __anon_vma_prepare() 200 vma->anon_vma = anon_vma; in __anon_vma_prepare() 201 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare() 315 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument [all …]
|
D | pagewalk.c | 39 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range() 62 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range() 84 if (pud_none(*pud) || !walk->vma) { in walk_pud_range() 93 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); in walk_pud_range() 104 split_huge_pud(walk->vma, pud, addr); in walk_pud_range() 180 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local 181 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() 222 struct vm_area_struct *vma = walk->vma; in walk_page_test() local 235 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test() 248 struct vm_area_struct *vma = walk->vma; in __walk_page_range() local [all …]
|
/Linux-v4.19/drivers/gpu/drm/ |
D | drm_vm.c | 50 struct vm_area_struct *vma; member 54 static void drm_vm_open(struct vm_area_struct *vma); 55 static void drm_vm_close(struct vm_area_struct *vma); 58 struct vm_area_struct *vma) in drm_io_prot() argument 60 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot() 71 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot() 72 vma->vm_start)) in drm_io_prot() 82 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument 84 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot() 105 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local [all …]
|
/Linux-v4.19/drivers/gpu/drm/nouveau/ |
D | nouveau_vmm.c | 28 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument 30 if (vma->mem) { in nouveau_vma_unmap() 31 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 32 vma->mem = NULL; in nouveau_vma_unmap() 37 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument 39 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map() 40 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map() 43 vma->mem = mem; in nouveau_vma_map() 50 struct nouveau_vma *vma; in nouveau_vma_find() local 52 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find() [all …]
|
/Linux-v4.19/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | uvmm.c | 53 struct nvkm_vma *vma; in nvkm_uvmm_mthd_unmap() local 63 vma = nvkm_vmm_node_search(vmm, addr); in nvkm_uvmm_mthd_unmap() 64 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap() 66 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap() 70 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { in nvkm_uvmm_mthd_unmap() 72 vma->user, !client->super, vma->busy); in nvkm_uvmm_mthd_unmap() 76 if (ret = -EINVAL, !vma->memory) { in nvkm_uvmm_mthd_unmap() 81 nvkm_vmm_unmap_locked(vmm, vma); in nvkm_uvmm_mthd_unmap() 97 struct nvkm_vma *vma; in nvkm_uvmm_mthd_map() local 116 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { in nvkm_uvmm_mthd_map() [all …]
|
D | vmm.c | 737 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local 738 if (vma) { in nvkm_vma_new() 739 vma->addr = addr; in nvkm_vma_new() 740 vma->size = size; in nvkm_vma_new() 741 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 742 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new() 744 return vma; in nvkm_vma_new() 748 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument 752 BUG_ON(vma->size == tail); in nvkm_vma_tail() 754 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail() [all …]
|
/Linux-v4.19/drivers/pci/ |
D | mmap.c | 23 struct vm_area_struct *vma, in pci_mmap_page_range() argument 31 vma->vm_pgoff -= start >> PAGE_SHIFT; in pci_mmap_page_range() 32 return pci_mmap_resource_range(pdev, bar, vma, mmap_state, in pci_mmap_page_range() 44 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 51 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 57 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 60 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 64 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range() 66 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() [all …]
|
/Linux-v4.19/drivers/gpu/drm/i915/selftests/ |
D | i915_vma.c | 32 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument 38 if (vma->vm != &ctx->ppgtt->vm) { in assert_vma() 43 if (vma->size != obj->base.size) { in assert_vma() 45 vma->size, obj->base.size); in assert_vma() 49 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { in assert_vma() 51 vma->ggtt_view.type); in assert_vma() 63 struct i915_vma *vma; in checked_vma_instance() local 66 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance() 67 if (IS_ERR(vma)) in checked_vma_instance() 68 return vma; in checked_vma_instance() [all …]
|
D | i915_gem_gtt.c | 328 struct i915_vma *vma; in close_object_list() local 330 vma = i915_vma_instance(obj, vm, NULL); in close_object_list() 331 if (!IS_ERR(vma)) in close_object_list() 332 ignored = i915_vma_unbind(vma); in close_object_list() 334 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma)) in close_object_list() 335 i915_vma_close(vma); in close_object_list() 353 struct i915_vma *vma; in fill_hole() local 391 vma = i915_vma_instance(obj, vm, NULL); in fill_hole() 392 if (IS_ERR(vma)) in fill_hole() 401 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole() [all …]
|
D | huge_pages.c | 339 static int igt_check_page_sizes(struct i915_vma *vma) in igt_check_page_sizes() argument 341 struct drm_i915_private *i915 = vma->vm->i915; in igt_check_page_sizes() 343 struct drm_i915_gem_object *obj = vma->obj; in igt_check_page_sizes() 346 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { in igt_check_page_sizes() 348 vma->page_sizes.sg & ~supported, supported); in igt_check_page_sizes() 352 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { in igt_check_page_sizes() 354 vma->page_sizes.gtt & ~supported, supported); in igt_check_page_sizes() 358 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { in igt_check_page_sizes() 360 vma->page_sizes.phys, obj->mm.page_sizes.phys); in igt_check_page_sizes() 364 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { in igt_check_page_sizes() [all …]
|
/Linux-v4.19/include/linux/ |
D | huge_mm.h | 13 struct vm_area_struct *vma); 17 struct vm_area_struct *vma); 28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 33 struct vm_area_struct *vma, 36 struct vm_area_struct *vma, 39 struct vm_area_struct *vma, 41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 50 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, [all …]
|
/Linux-v4.19/fs/proc/ |
D | task_mmu.c | 136 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) in m_next_vma() argument 138 if (vma == priv->tail_vma) in m_next_vma() 140 return vma->vm_next ?: priv->tail_vma; in m_next_vma() 143 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) in m_cache_vma() argument 146 m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL; in m_cache_vma() 154 struct vm_area_struct *vma; in m_start() local 174 vma = find_vma(mm, last_addr - 1); in m_start() 175 if (vma && vma->vm_start <= last_addr) in m_start() 176 vma = m_next_vma(priv, vma); in m_start() 177 if (vma) in m_start() [all …]
|
/Linux-v4.19/arch/powerpc/include/asm/book3s/64/ |
D | tlbflush.h | 50 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, in flush_pmd_tlb_range() argument 54 return radix__flush_pmd_tlb_range(vma, start, end); in flush_pmd_tlb_range() 55 return hash__flush_tlb_range(vma, start, end); in flush_pmd_tlb_range() 59 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, in flush_hugetlb_tlb_range() argument 64 return radix__flush_hugetlb_tlb_range(vma, start, end); in flush_hugetlb_tlb_range() 65 return hash__flush_tlb_range(vma, start, end); in flush_hugetlb_tlb_range() 68 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 72 return radix__flush_tlb_range(vma, start, end); in flush_tlb_range() 73 return hash__flush_tlb_range(vma, start, end); in flush_tlb_range() 91 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument [all …]
|