1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7
8 #include <linux/fs.h> /* only for vma_is_dax() */
9
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
18
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
40 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
41 unsigned long cp_flags);
42 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
43 pgprot_t pgprot, bool write);
44
45 /**
46 * vmf_insert_pfn_pmd - insert a pmd size pfn
47 * @vmf: Structure describing the fault
48 * @pfn: pfn to insert
49 * @pgprot: page protection to use
50 * @write: whether it's a write fault
51 *
52 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
53 *
54 * Return: vm_fault_t value.
55 */
vmf_insert_pfn_pmd(struct vm_fault * vmf,pfn_t pfn,bool write)56 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
57 bool write)
58 {
59 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
60 }
61 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
62 pgprot_t pgprot, bool write);
63
64 /**
65 * vmf_insert_pfn_pud - insert a pud size pfn
66 * @vmf: Structure describing the fault
67 * @pfn: pfn to insert
68 * @pgprot: page protection to use
69 * @write: whether it's a write fault
70 *
71 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
72 *
73 * Return: vm_fault_t value.
74 */
vmf_insert_pfn_pud(struct vm_fault * vmf,pfn_t pfn,bool write)75 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
76 bool write)
77 {
78 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
79 }
80
81 enum transparent_hugepage_flag {
82 TRANSPARENT_HUGEPAGE_NEVER_DAX,
83 TRANSPARENT_HUGEPAGE_FLAG,
84 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
87 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
88 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
89 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
90 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
91 };
92
93 struct kobject;
94 struct kobj_attribute;
95
96 ssize_t single_hugepage_flag_store(struct kobject *kobj,
97 struct kobj_attribute *attr,
98 const char *buf, size_t count,
99 enum transparent_hugepage_flag flag);
100 ssize_t single_hugepage_flag_show(struct kobject *kobj,
101 struct kobj_attribute *attr, char *buf,
102 enum transparent_hugepage_flag flag);
103 extern struct kobj_attribute shmem_enabled_attr;
104
105 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
106 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
107
108 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
109 #define HPAGE_PMD_SHIFT PMD_SHIFT
110 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
111 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
112
113 #define HPAGE_PUD_SHIFT PUD_SHIFT
114 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
115 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
116
117 extern unsigned long transparent_hugepage_flags;
118
119 #define hugepage_flags_enabled() \
120 (transparent_hugepage_flags & \
121 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
122 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
123 #define hugepage_flags_always() \
124 (transparent_hugepage_flags & \
125 (1<<TRANSPARENT_HUGEPAGE_FLAG))
126
127 /*
128 * Do the below checks:
129 * - For file vma, check if the linear page offset of vma is
130 * HPAGE_PMD_NR aligned within the file. The hugepage is
131 * guaranteed to be hugepage-aligned within the file, but we must
132 * check that the PMD-aligned addresses in the VMA map to
133 * PMD-aligned offsets within the file, else the hugepage will
134 * not be PMD-mappable.
135 * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
136 * area.
137 */
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long addr)138 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
139 unsigned long addr)
140 {
141 unsigned long haddr;
142
143 /* Don't have to check pgoff for anonymous vma */
144 if (!vma_is_anonymous(vma)) {
145 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
146 HPAGE_PMD_NR))
147 return false;
148 }
149
150 haddr = addr & HPAGE_PMD_MASK;
151
152 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
153 return false;
154 return true;
155 }
156
file_thp_enabled(struct vm_area_struct * vma)157 static inline bool file_thp_enabled(struct vm_area_struct *vma)
158 {
159 struct inode *inode;
160
161 if (!vma->vm_file)
162 return false;
163
164 inode = vma->vm_file->f_inode;
165
166 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
167 (vma->vm_flags & VM_EXEC) &&
168 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
169 }
170
171 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
172 bool smaps, bool in_pf, bool enforce_sysfs);
173
174 #define transparent_hugepage_use_zero_page() \
175 (transparent_hugepage_flags & \
176 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
177
178 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
179 unsigned long len, unsigned long pgoff, unsigned long flags);
180
181 void prep_transhuge_page(struct page *page);
182 void free_transhuge_page(struct page *page);
183
184 bool can_split_folio(struct folio *folio, int *pextra_pins);
185 int split_huge_page_to_list(struct page *page, struct list_head *list);
split_huge_page(struct page * page)186 static inline int split_huge_page(struct page *page)
187 {
188 return split_huge_page_to_list(page, NULL);
189 }
190 void deferred_split_huge_page(struct page *page);
191
192 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
193 unsigned long address, bool freeze, struct folio *folio);
194
195 #define split_huge_pmd(__vma, __pmd, __address) \
196 do { \
197 pmd_t *____pmd = (__pmd); \
198 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
199 || pmd_devmap(*____pmd)) \
200 __split_huge_pmd(__vma, __pmd, __address, \
201 false, NULL); \
202 } while (0)
203
204
205 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
206 bool freeze, struct folio *folio);
207
208 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
209 unsigned long address);
210
211 #define split_huge_pud(__vma, __pud, __address) \
212 do { \
213 pud_t *____pud = (__pud); \
214 if (pud_trans_huge(*____pud) \
215 || pud_devmap(*____pud)) \
216 __split_huge_pud(__vma, __pud, __address); \
217 } while (0)
218
219 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
220 int advice);
221 int madvise_collapse(struct vm_area_struct *vma,
222 struct vm_area_struct **prev,
223 unsigned long start, unsigned long end);
224 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
225 unsigned long end, long adjust_next);
226 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
227 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
228
is_swap_pmd(pmd_t pmd)229 static inline int is_swap_pmd(pmd_t pmd)
230 {
231 return !pmd_none(pmd) && !pmd_present(pmd);
232 }
233
234 /* mmap_lock must be held on entry */
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)235 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
236 struct vm_area_struct *vma)
237 {
238 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
239 return __pmd_trans_huge_lock(pmd, vma);
240 else
241 return NULL;
242 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)243 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
244 struct vm_area_struct *vma)
245 {
246 if (pud_trans_huge(*pud) || pud_devmap(*pud))
247 return __pud_trans_huge_lock(pud, vma);
248 else
249 return NULL;
250 }
251
252 /**
253 * folio_test_pmd_mappable - Can we map this folio with a PMD?
254 * @folio: The folio to test
255 */
folio_test_pmd_mappable(struct folio * folio)256 static inline bool folio_test_pmd_mappable(struct folio *folio)
257 {
258 return folio_order(folio) >= HPAGE_PMD_ORDER;
259 }
260
261 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
262 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
263 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
264 pud_t *pud, int flags, struct dev_pagemap **pgmap);
265
266 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
267
268 extern struct page *huge_zero_page;
269 extern unsigned long huge_zero_pfn;
270
is_huge_zero_page(struct page * page)271 static inline bool is_huge_zero_page(struct page *page)
272 {
273 return READ_ONCE(huge_zero_page) == page;
274 }
275
is_huge_zero_pmd(pmd_t pmd)276 static inline bool is_huge_zero_pmd(pmd_t pmd)
277 {
278 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
279 }
280
is_huge_zero_pud(pud_t pud)281 static inline bool is_huge_zero_pud(pud_t pud)
282 {
283 return false;
284 }
285
286 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
287 void mm_put_huge_zero_page(struct mm_struct *mm);
288
289 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
290
thp_migration_supported(void)291 static inline bool thp_migration_supported(void)
292 {
293 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
294 }
295
page_deferred_list(struct page * page)296 static inline struct list_head *page_deferred_list(struct page *page)
297 {
298 /*
299 * See organization of tail pages of compound page in
300 * "struct page" definition.
301 */
302 return &page[2].deferred_list;
303 }
304
305 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
306 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
307 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
308 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
309
310 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
311 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
312 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
313
folio_test_pmd_mappable(struct folio * folio)314 static inline bool folio_test_pmd_mappable(struct folio *folio)
315 {
316 return false;
317 }
318
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long addr)319 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
320 unsigned long addr)
321 {
322 return false;
323 }
324
hugepage_vma_check(struct vm_area_struct * vma,unsigned long vm_flags,bool smaps,bool in_pf,bool enforce_sysfs)325 static inline bool hugepage_vma_check(struct vm_area_struct *vma,
326 unsigned long vm_flags, bool smaps,
327 bool in_pf, bool enforce_sysfs)
328 {
329 return false;
330 }
331
prep_transhuge_page(struct page * page)332 static inline void prep_transhuge_page(struct page *page) {}
333
334 #define transparent_hugepage_flags 0UL
335
336 #define thp_get_unmapped_area NULL
337
338 static inline bool
can_split_folio(struct folio * folio,int * pextra_pins)339 can_split_folio(struct folio *folio, int *pextra_pins)
340 {
341 return false;
342 }
343 static inline int
split_huge_page_to_list(struct page * page,struct list_head * list)344 split_huge_page_to_list(struct page *page, struct list_head *list)
345 {
346 return 0;
347 }
split_huge_page(struct page * page)348 static inline int split_huge_page(struct page *page)
349 {
350 return 0;
351 }
deferred_split_huge_page(struct page * page)352 static inline void deferred_split_huge_page(struct page *page) {}
353 #define split_huge_pmd(__vma, __pmd, __address) \
354 do { } while (0)
355
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze,struct folio * folio)356 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
357 unsigned long address, bool freeze, struct folio *folio) {}
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze,struct folio * folio)358 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
359 unsigned long address, bool freeze, struct folio *folio) {}
360
361 #define split_huge_pud(__vma, __pmd, __address) \
362 do { } while (0)
363
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)364 static inline int hugepage_madvise(struct vm_area_struct *vma,
365 unsigned long *vm_flags, int advice)
366 {
367 return -EINVAL;
368 }
369
madvise_collapse(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)370 static inline int madvise_collapse(struct vm_area_struct *vma,
371 struct vm_area_struct **prev,
372 unsigned long start, unsigned long end)
373 {
374 return -EINVAL;
375 }
376
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)377 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
378 unsigned long start,
379 unsigned long end,
380 long adjust_next)
381 {
382 }
is_swap_pmd(pmd_t pmd)383 static inline int is_swap_pmd(pmd_t pmd)
384 {
385 return 0;
386 }
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)387 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
388 struct vm_area_struct *vma)
389 {
390 return NULL;
391 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)392 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
393 struct vm_area_struct *vma)
394 {
395 return NULL;
396 }
397
do_huge_pmd_numa_page(struct vm_fault * vmf)398 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
399 {
400 return 0;
401 }
402
is_huge_zero_page(struct page * page)403 static inline bool is_huge_zero_page(struct page *page)
404 {
405 return false;
406 }
407
is_huge_zero_pmd(pmd_t pmd)408 static inline bool is_huge_zero_pmd(pmd_t pmd)
409 {
410 return false;
411 }
412
is_huge_zero_pud(pud_t pud)413 static inline bool is_huge_zero_pud(pud_t pud)
414 {
415 return false;
416 }
417
mm_put_huge_zero_page(struct mm_struct * mm)418 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
419 {
420 return;
421 }
422
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)423 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
424 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
425 {
426 return NULL;
427 }
428
follow_devmap_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,int flags,struct dev_pagemap ** pgmap)429 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
430 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
431 {
432 return NULL;
433 }
434
thp_migration_supported(void)435 static inline bool thp_migration_supported(void)
436 {
437 return false;
438 }
439 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
440
split_folio_to_list(struct folio * folio,struct list_head * list)441 static inline int split_folio_to_list(struct folio *folio,
442 struct list_head *list)
443 {
444 return split_huge_page_to_list(&folio->page, list);
445 }
446
split_folio(struct folio * folio)447 static inline int split_folio(struct folio *folio)
448 {
449 return split_folio_to_list(folio, NULL);
450 }
451
452 /*
453 * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
454 * limitations in the implementation like arm64 MTE can override this to
455 * false
456 */
457 #ifndef arch_thp_swp_supported
arch_thp_swp_supported(void)458 static inline bool arch_thp_swp_supported(void)
459 {
460 return true;
461 }
462 #endif
463
464 #endif /* _LINUX_HUGE_MM_H */
465