Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 971) sorted by relevance

12345678910>>...39

/Linux-v5.4/drivers/gpu/drm/i915/
Di915_vma.c48 void i915_vma_free(struct i915_vma *vma) in i915_vma_free() argument
50 return kmem_cache_free(global.slab_vmas, vma); in i915_vma_free()
57 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
63 if (!vma->node.stack) { in vma_print_allocator()
65 vma->node.start, vma->node.size, reason); in vma_print_allocator()
69 nr_entries = stack_depot_fetch(vma->node.stack, &entries); in vma_print_allocator()
72 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
77 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
103 struct i915_vma *vma; in vma_create() local
109 vma = i915_vma_alloc(); in vma_create()
[all …]
Di915_vma.h156 static inline bool i915_vma_is_active(const struct i915_vma *vma) in i915_vma_is_active() argument
158 return !i915_active_is_idle(&vma->active); in i915_vma_is_active()
161 int __must_check i915_vma_move_to_active(struct i915_vma *vma,
165 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument
167 return vma->flags & I915_VMA_GGTT; in i915_vma_is_ggtt()
170 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument
172 return vma->flags & I915_VMA_GGTT_WRITE; in i915_vma_has_ggtt_write()
175 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) in i915_vma_set_ggtt_write() argument
177 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_vma_set_ggtt_write()
178 vma->flags |= I915_VMA_GGTT_WRITE; in i915_vma_set_ggtt_write()
[all …]
Di915_gem_evict.c57 struct i915_vma *vma, in mark_free() argument
61 if (i915_vma_is_pinned(vma)) in mark_free()
64 list_add(&vma->evict_link, unwind); in mark_free()
65 return drm_mm_scan_add_block(scan, &vma->node); in mark_free()
101 struct i915_vma *vma, *next; in i915_gem_evict_something() local
142 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { in i915_gem_evict_something()
158 if (i915_vma_is_active(vma)) { in i915_gem_evict_something()
159 if (vma == active) { in i915_gem_evict_something()
168 active = vma; in i915_gem_evict_something()
170 list_move_tail(&vma->vm_link, &vm->bound_list); in i915_gem_evict_something()
[all …]
Di915_gem_fence_reg.c63 struct i915_vma *vma) in i965_write_fence_reg() argument
81 if (vma) { in i965_write_fence_reg()
82 unsigned int stride = i915_gem_object_get_stride(vma->obj); in i965_write_fence_reg()
84 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); in i965_write_fence_reg()
85 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE)); in i965_write_fence_reg()
86 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE)); in i965_write_fence_reg()
89 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32; in i965_write_fence_reg()
90 val |= vma->node.start; in i965_write_fence_reg()
92 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y) in i965_write_fence_reg()
120 struct i915_vma *vma) in i915_write_fence_reg() argument
[all …]
/Linux-v5.4/mm/
Dmmap.c77 struct vm_area_struct *vma, struct vm_area_struct *prev,
128 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
130 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
133 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
134 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
139 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
145 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
148 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
150 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
154 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
[all …]
Dnommu.c100 struct vm_area_struct *vma; in kobjsize() local
102 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
103 if (vma) in kobjsize()
104 return vma->vm_end - vma->vm_start; in kobjsize()
124 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
164 struct vm_area_struct *vma; in vmalloc_user() local
167 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user()
168 if (vma) in vmalloc_user()
169 vma->vm_flags |= VM_USERMAP; in vmalloc_user()
[all …]
Dmremap.c59 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
84 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument
86 if (vma->vm_file) in take_rmap_locks()
87 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
88 if (vma->anon_vma) in take_rmap_locks()
89 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks()
92 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument
94 if (vma->anon_vma) in drop_rmap_locks()
95 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks()
96 if (vma->vm_file) in drop_rmap_locks()
[all …]
Dmadvise.c65 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument
69 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
72 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
88 if (vma->vm_flags & VM_IO) { in madvise_behavior()
96 if (vma->vm_file || vma->vm_flags & VM_SHARED) { in madvise_behavior()
109 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) { in madvise_behavior()
117 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
123 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior()
129 if (new_flags == vma->vm_flags) { in madvise_behavior()
130 *prev = vma; in madvise_behavior()
[all …]
Dmprotect.c38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument
61 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
64 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
65 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
68 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
83 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
88 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
112 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
120 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
123 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
[all …]
Dmemory.c370 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
373 while (vma) { in free_pgtables()
374 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
375 unsigned long addr = vma->vm_start; in free_pgtables()
381 unlink_anon_vmas(vma); in free_pgtables()
382 unlink_file_vma(vma); in free_pgtables()
384 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
385 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
391 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
393 vma = next; in free_pgtables()
[all …]
Dmlock.c375 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() argument
386 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
399 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
445 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument
448 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range()
465 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range()
497 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range()
519 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument
522 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
527 vm_flags_t old_flags = vma->vm_flags; in mlock_fixup()
[all …]
Dhuge_memory.c65 bool transparent_hugepage_enabled(struct vm_area_struct *vma) in transparent_hugepage_enabled() argument
68 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; in transparent_hugepage_enabled()
70 if (!transhuge_vma_suitable(vma, addr)) in transparent_hugepage_enabled()
72 if (vma_is_anonymous(vma)) in transparent_hugepage_enabled()
73 return __transparent_hugepage_enabled(vma); in transparent_hugepage_enabled()
74 if (vma_is_shmem(vma)) in transparent_hugepage_enabled()
75 return shmem_huge_enabled(vma); in transparent_hugepage_enabled()
492 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
494 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
576 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local
[all …]
Dpgtable-generic.c55 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument
61 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
62 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags()
69 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument
73 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
75 flush_tlb_page(vma, address); in ptep_clear_flush_young()
81 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument
84 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
88 flush_tlb_page(vma, address); in ptep_clear_flush()
96 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument
[all …]
/Linux-v5.4/drivers/gpu/drm/
Ddrm_vm.c62 struct vm_area_struct *vma; member
66 static void drm_vm_open(struct vm_area_struct *vma);
67 static void drm_vm_close(struct vm_area_struct *vma);
70 struct vm_area_struct *vma) in drm_io_prot() argument
72 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
84 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
85 vma->vm_start)) in drm_io_prot()
95 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument
97 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
118 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/
Dnouveau_vmm.c29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument
31 if (vma->mem) { in nouveau_vma_unmap()
32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap()
33 vma->mem = NULL; in nouveau_vma_unmap()
38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument
40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map()
41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map()
44 vma->mem = mem; in nouveau_vma_map()
51 struct nouveau_vma *vma; in nouveau_vma_find() local
53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find()
[all …]
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local
751 if (vma) { in nvkm_vma_new()
752 vma->addr = addr; in nvkm_vma_new()
753 vma->size = size; in nvkm_vma_new()
754 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
755 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
757 return vma; in nvkm_vma_new()
761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument
765 BUG_ON(vma->size == tail); in nvkm_vma_tail()
767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail()
[all …]
Duvmm.c116 struct nvkm_vma *vma; in nvkm_uvmm_mthd_unmap() local
126 vma = nvkm_vmm_node_search(vmm, addr); in nvkm_uvmm_mthd_unmap()
127 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap()
129 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap()
133 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { in nvkm_uvmm_mthd_unmap()
135 vma->user, !client->super, vma->busy); in nvkm_uvmm_mthd_unmap()
139 if (ret = -EINVAL, !vma->memory) { in nvkm_uvmm_mthd_unmap()
144 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_uvmm_mthd_unmap()
160 struct nvkm_vma *vma; in nvkm_uvmm_mthd_map() local
179 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { in nvkm_uvmm_mthd_map()
[all …]
/Linux-v5.4/drivers/pci/
Dmmap.c23 struct vm_area_struct *vma, in pci_mmap_page_range() argument
31 vma->vm_pgoff -= start >> PAGE_SHIFT; in pci_mmap_page_range()
32 return pci_mmap_resource_range(pdev, bar, vma, mmap_state, in pci_mmap_page_range()
44 struct vm_area_struct *vma, in pci_mmap_resource_range() argument
51 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range()
55 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range()
57 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range()
60 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range()
64 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range()
66 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range()
[all …]
/Linux-v5.4/include/linux/
Dhuge_mm.h13 struct vm_area_struct *vma);
17 struct vm_area_struct *vma);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
33 struct vm_area_struct *vma,
36 struct vm_area_struct *vma,
39 struct vm_area_struct *vma,
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
90 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/selftests/
Di915_gem_gtt.c328 struct i915_vma *vma; in close_object_list() local
330 vma = i915_vma_instance(obj, vm, NULL); in close_object_list()
331 if (!IS_ERR(vma)) in close_object_list()
332 ignored = i915_vma_unbind(vma); in close_object_list()
334 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma)) in close_object_list()
335 i915_vma_close(vma); in close_object_list()
353 struct i915_vma *vma; in fill_hole() local
391 vma = i915_vma_instance(obj, vm, NULL); in fill_hole()
392 if (IS_ERR(vma)) in fill_hole()
401 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole()
[all …]
Di915_vma.c35 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument
41 if (vma->vm != ctx->vm) { in assert_vma()
46 if (vma->size != obj->base.size) { in assert_vma()
48 vma->size, obj->base.size); in assert_vma()
52 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { in assert_vma()
54 vma->ggtt_view.type); in assert_vma()
66 struct i915_vma *vma; in checked_vma_instance() local
69 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance()
70 if (IS_ERR(vma)) in checked_vma_instance()
71 return vma; in checked_vma_instance()
[all …]
/Linux-v5.4/drivers/gpu/drm/msm/
Dmsm_gem_vma.c32 struct msm_gem_vma *vma) in msm_gem_purge_vma() argument
34 unsigned size = vma->node.size << PAGE_SHIFT; in msm_gem_purge_vma()
37 if (WARN_ON(vma->inuse > 0)) in msm_gem_purge_vma()
41 if (!vma->mapped) in msm_gem_purge_vma()
45 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_purge_vma()
47 vma->mapped = false; in msm_gem_purge_vma()
52 struct msm_gem_vma *vma) in msm_gem_unmap_vma() argument
54 if (!WARN_ON(!vma->iova)) in msm_gem_unmap_vma()
55 vma->inuse--; in msm_gem_unmap_vma()
60 struct msm_gem_vma *vma, int prot, in msm_gem_map_vma() argument
[all …]
/Linux-v5.4/fs/proc/
Dtask_mmu.c136 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) in m_next_vma() argument
138 if (vma == priv->tail_vma) in m_next_vma()
140 return vma->vm_next ?: priv->tail_vma; in m_next_vma()
143 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) in m_cache_vma() argument
146 m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL; in m_cache_vma()
154 struct vm_area_struct *vma; in m_start() local
178 vma = find_vma(mm, last_addr - 1); in m_start()
179 if (vma && vma->vm_start <= last_addr) in m_start()
180 vma = m_next_vma(priv, vma); in m_start()
181 if (vma) in m_start()
[all …]
/Linux-v5.4/arch/powerpc/include/asm/book3s/64/
Dtlbflush.h50 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, in flush_pmd_tlb_range() argument
54 return radix__flush_pmd_tlb_range(vma, start, end); in flush_pmd_tlb_range()
55 return hash__flush_tlb_range(vma, start, end); in flush_pmd_tlb_range()
59 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, in flush_hugetlb_tlb_range() argument
64 return radix__flush_hugetlb_tlb_range(vma, start, end); in flush_hugetlb_tlb_range()
65 return hash__flush_tlb_range(vma, start, end); in flush_hugetlb_tlb_range()
68 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
72 return radix__flush_tlb_range(vma, start, end); in flush_tlb_range()
73 return hash__flush_tlb_range(vma, start, end); in flush_tlb_range()
91 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/gem/selftests/
Dhuge_pages.c331 static int igt_check_page_sizes(struct i915_vma *vma) in igt_check_page_sizes() argument
333 struct drm_i915_private *i915 = vma->vm->i915; in igt_check_page_sizes()
335 struct drm_i915_gem_object *obj = vma->obj; in igt_check_page_sizes()
338 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { in igt_check_page_sizes()
340 vma->page_sizes.sg & ~supported, supported); in igt_check_page_sizes()
344 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { in igt_check_page_sizes()
346 vma->page_sizes.gtt & ~supported, supported); in igt_check_page_sizes()
350 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { in igt_check_page_sizes()
352 vma->page_sizes.phys, obj->mm.page_sizes.phys); in igt_check_page_sizes()
356 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { in igt_check_page_sizes()
[all …]

12345678910>>...39