Lines Matching refs:vma
13 struct vm_area_struct *vma);
17 struct vm_area_struct *vma);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
33 struct vm_area_struct *vma,
36 struct vm_area_struct *vma,
39 struct vm_area_struct *vma,
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
90 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
98 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) in __transparent_hugepage_enabled() argument
100 if (vma->vm_flags & VM_NOHUGEPAGE) in __transparent_hugepage_enabled()
103 if (is_vma_temporary_stack(vma)) in __transparent_hugepage_enabled()
106 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in __transparent_hugepage_enabled()
117 if (vma_is_dax(vma)) in __transparent_hugepage_enabled()
122 return !!(vma->vm_flags & VM_HUGEPAGE); in __transparent_hugepage_enabled()
127 bool transparent_hugepage_enabled(struct vm_area_struct *vma);
131 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument
135 if (!vma_is_anonymous(vma)) { in transhuge_vma_suitable()
136 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != in transhuge_vma_suitable()
137 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) in transhuge_vma_suitable()
141 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in transhuge_vma_suitable()
172 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
185 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
188 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
199 extern int hugepage_madvise(struct vm_area_struct *vma,
201 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
206 struct vm_area_struct *vma);
208 struct vm_area_struct *vma);
217 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
219 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); in pmd_trans_huge_lock()
221 return __pmd_trans_huge_lock(pmd, vma); in pmd_trans_huge_lock()
226 struct vm_area_struct *vma) in pud_trans_huge_lock() argument
228 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); in pud_trans_huge_lock()
230 return __pud_trans_huge_lock(pud, vma); in pud_trans_huge_lock()
241 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
243 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
295 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) in __transparent_hugepage_enabled() argument
300 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) in transparent_hugepage_enabled() argument
305 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument
336 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
338 static inline void split_huge_pmd_address(struct vm_area_struct *vma, in split_huge_pmd_address() argument
344 static inline int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() argument
350 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
361 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
366 struct vm_area_struct *vma) in pud_trans_huge_lock() argument
392 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, in follow_devmap_pmd() argument
398 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, in follow_devmap_pud() argument