Lines Matching refs:vma

113 	struct vm_area_struct *vma;  member
128 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
129 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
130 int move_hugetlb_page_tables(struct vm_area_struct *vma,
136 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
143 struct vm_area_struct *vma,
150 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
161 struct vm_area_struct *vma,
175 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
202 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
244 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
246 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
249 extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
251 extern void __hugetlb_zap_end(struct vm_area_struct *vma,
254 static inline void hugetlb_zap_begin(struct vm_area_struct *vma, in hugetlb_zap_begin() argument
257 if (is_vm_hugetlb_page(vma)) in hugetlb_zap_begin()
258 __hugetlb_zap_begin(vma, start, end); in hugetlb_zap_begin()
261 static inline void hugetlb_zap_end(struct vm_area_struct *vma, in hugetlb_zap_end() argument
264 if (is_vm_hugetlb_page(vma)) in hugetlb_zap_end()
265 __hugetlb_zap_end(vma, details); in hugetlb_zap_end()
268 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
269 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
270 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
271 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
272 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
273 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
278 long hugetlb_change_protection(struct vm_area_struct *vma,
283 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
287 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma) in hugetlb_dup_vma_private() argument
291 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) in clear_vma_resv_huge_pages() argument
307 struct vm_area_struct *vma, in huge_pmd_unshare() argument
314 struct vm_area_struct *vma, in adjust_range_if_pmd_sharing_possible() argument
320 struct vm_area_struct *vma, in hugetlb_zap_begin() argument
326 struct vm_area_struct *vma, in hugetlb_zap_end() argument
332 struct vm_area_struct *vma, unsigned long address, unsigned int flags, in hugetlb_follow_page_mask() argument
347 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, in move_hugetlb_page_tables() argument
376 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma) in hugetlb_vma_lock_read() argument
380 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma) in hugetlb_vma_unlock_read() argument
384 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma) in hugetlb_vma_lock_write() argument
388 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma) in hugetlb_vma_unlock_write() argument
392 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma) in hugetlb_vma_trylock_write() argument
397 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) in hugetlb_vma_assert_locked() argument
469 struct vm_area_struct *vma, unsigned long address, in hugetlb_change_protection() argument
477 struct vm_area_struct *vma, unsigned long start, in __unmap_hugepage_range() argument
485 struct vm_area_struct *vma, unsigned long address, in hugetlb_fault() argument
492 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } in hugetlb_unshare_all_pmds() argument
747 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
751 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
755 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
803 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) in hstate_vma() argument
805 return hstate_file(vma->vm_file); in hstate_vma()
813 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
815 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
1006 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, in huge_ptep_modify_prot_start() argument
1009 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_modify_prot_start()
1015 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, in huge_ptep_modify_prot_commit() argument
1019 unsigned long psize = huge_page_size(hstate_vma(vma)); in huge_ptep_modify_prot_commit()
1021 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); in huge_ptep_modify_prot_commit()
1049 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
1064 struct vm_area_struct *vma,
1085 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1110 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1115 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1199 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1253 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1260 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) argument
1263 static inline bool __vma_shareable_lock(struct vm_area_struct *vma) in __vma_shareable_lock() argument
1265 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; in __vma_shareable_lock()
1268 static inline bool __vma_private_lock(struct vm_area_struct *vma) in __vma_private_lock() argument
1270 return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data; in __vma_private_lock()
1278 hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) in hugetlb_walk() argument
1282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_walk()
1291 if (__vma_shareable_lock(vma)) in hugetlb_walk()
1294 &vma->vm_file->f_mapping->i_mmap_rwsem)); in hugetlb_walk()
1296 return huge_pte_offset(vma->vm_mm, addr, sz); in hugetlb_walk()