Lines Matching refs:vma
17 struct vm_area_struct *vma);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
39 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
59 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pmd()
78 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); in vmf_insert_pfn_pud()
138 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument
144 if (!vma_is_anonymous(vma)) { in transhuge_vma_suitable()
145 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in transhuge_vma_suitable()
152 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in transhuge_vma_suitable()
157 static inline bool file_thp_enabled(struct vm_area_struct *vma) in file_thp_enabled() argument
161 if (!vma->vm_file) in file_thp_enabled()
164 inode = vma->vm_file->f_inode; in file_thp_enabled()
167 (vma->vm_flags & VM_EXEC) && in file_thp_enabled()
171 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
192 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
205 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
208 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
219 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
221 int madvise_collapse(struct vm_area_struct *vma,
224 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
226 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
227 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
236 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
239 return __pmd_trans_huge_lock(pmd, vma); in pmd_trans_huge_lock()
244 struct vm_area_struct *vma) in pud_trans_huge_lock() argument
247 return __pud_trans_huge_lock(pud, vma); in pud_trans_huge_lock()
261 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
263 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
319 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, in transhuge_vma_suitable() argument
325 static inline bool hugepage_vma_check(struct vm_area_struct *vma, in hugepage_vma_check() argument
356 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
358 static inline void split_huge_pmd_address(struct vm_area_struct *vma, in split_huge_pmd_address() argument
364 static inline int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() argument
370 static inline int madvise_collapse(struct vm_area_struct *vma, in madvise_collapse() argument
377 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
388 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
393 struct vm_area_struct *vma) in pud_trans_huge_lock() argument
423 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, in follow_devmap_pmd() argument
429 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, in follow_devmap_pud() argument