1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEWALK_H 3 #define _LINUX_PAGEWALK_H 4 5 #include <linux/mm.h> 6 7 struct mm_walk; 8 9 /** 10 * mm_walk_ops - callbacks for walk_page_range 11 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry 12 * @p4d_entry: if set, called for each non-empty P4D entry 13 * @pud_entry: if set, called for each non-empty PUD entry 14 * @pmd_entry: if set, called for each non-empty PMD entry 15 * this handler is required to be able to handle 16 * pmd_trans_huge() pmds. They may simply choose to 17 * split_huge_page() instead of handling it explicitly. 18 * @pte_entry: if set, called for each non-empty PTE (lowest-level) 19 * entry 20 * @pte_hole: if set, called for each hole at all levels, 21 * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD 22 * 4:PTE. Any folded depths (where PTRS_PER_P?D is equal 23 * to 1) are skipped. 24 * @hugetlb_entry: if set, called for each hugetlb entry 25 * @test_walk: caller specific callback function to determine whether 26 * we walk over the current vma or not. Returning 0 means 27 * "do page table walk over the current vma", returning 28 * a negative value means "abort current page table walk 29 * right now" and returning 1 means "skip the current vma" 30 * @pre_vma: if set, called before starting walk on a non-null vma. 31 * @post_vma: if set, called after a walk on a non-null vma, provided 32 * that @pre_vma and the vma walk succeeded. 33 * 34 * p?d_entry callbacks are called even if those levels are folded on a 35 * particular architecture/configuration. 36 */ 37 struct mm_walk_ops { 38 int (*pgd_entry)(pgd_t *pgd, unsigned long addr, 39 unsigned long next, struct mm_walk *walk); 40 int (*p4d_entry)(p4d_t *p4d, unsigned long addr, 41 unsigned long next, struct mm_walk *walk); 42 int (*pud_entry)(pud_t *pud, unsigned long addr, 43 unsigned long next, struct mm_walk *walk); 44 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 45 unsigned long next, struct mm_walk *walk); 46 int (*pte_entry)(pte_t *pte, unsigned long addr, 47 unsigned long next, struct mm_walk *walk); 48 int (*pte_hole)(unsigned long addr, unsigned long next, 49 int depth, struct mm_walk *walk); 50 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 51 unsigned long addr, unsigned long next, 52 struct mm_walk *walk); 53 int (*test_walk)(unsigned long addr, unsigned long next, 54 struct mm_walk *walk); 55 int (*pre_vma)(unsigned long start, unsigned long end, 56 struct mm_walk *walk); 57 void (*post_vma)(struct mm_walk *walk); 58 }; 59 60 /* 61 * Action for pud_entry / pmd_entry callbacks. 62 * ACTION_SUBTREE is the default 63 */ 64 enum page_walk_action { 65 /* Descend to next level, splitting huge pages if needed and possible */ 66 ACTION_SUBTREE = 0, 67 /* Continue to next entry at this level (ignoring any subtree) */ 68 ACTION_CONTINUE = 1, 69 /* Call again for this entry */ 70 ACTION_AGAIN = 2 71 }; 72 73 /** 74 * mm_walk - walk_page_range data 75 * @ops: operation to call during the walk 76 * @mm: mm_struct representing the target process of page table walk 77 * @pgd: pointer to PGD; only valid with no_vma (otherwise set to NULL) 78 * @vma: vma currently walked (NULL if walking outside vmas) 79 * @action: next action to perform (see enum page_walk_action) 80 * @no_vma: walk ignoring vmas (vma will always be NULL) 81 * @private: private data for callbacks' usage 82 * 83 * (see the comment on walk_page_range() for more details) 84 */ 85 struct mm_walk { 86 const struct mm_walk_ops *ops; 87 struct mm_struct *mm; 88 pgd_t *pgd; 89 struct vm_area_struct *vma; 90 enum page_walk_action action; 91 bool no_vma; 92 void *private; 93 }; 94 95 int walk_page_range(struct mm_struct *mm, unsigned long start, 96 unsigned long end, const struct mm_walk_ops *ops, 97 void *private); 98 int walk_page_range_novma(struct mm_struct *mm, unsigned long start, 99 unsigned long end, const struct mm_walk_ops *ops, 100 pgd_t *pgd, 101 void *private); 102 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, 103 void *private); 104 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, 105 pgoff_t nr, const struct mm_walk_ops *ops, 106 void *private); 107 108 #endif /* _LINUX_PAGEWALK_H */ 109