Lines Matching full:pmd
29 unsigned long addr, pmd_t *pmd,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
40 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
46 * vmf_insert_pfn_pmd - insert a pmd size pfn
52 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
132 * check that the PMD-aligned addresses in the VMA map to
133 * PMD-aligned offsets within the file, else the hugepage will
134 * not be PMD-mappable.
192 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
226 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
229 static inline int is_swap_pmd(pmd_t pmd) in is_swap_pmd() argument
231 return !pmd_none(pmd) && !pmd_present(pmd); in is_swap_pmd()
235 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, in pmd_trans_huge_lock() argument
238 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) in pmd_trans_huge_lock()
239 return __pmd_trans_huge_lock(pmd, vma); in pmd_trans_huge_lock()
253 * folio_test_pmd_mappable - Can we map this folio with a PMD?
262 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
276 static inline bool is_huge_zero_pmd(pmd_t pmd) in is_huge_zero_pmd() argument
278 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); in is_huge_zero_pmd()
356 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
383 static inline int is_swap_pmd(pmd_t pmd) in is_swap_pmd() argument
387 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, in pmd_trans_huge_lock() argument
408 static inline bool is_huge_zero_pmd(pmd_t pmd) in is_huge_zero_pmd() argument
424 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) in follow_devmap_pmd() argument