1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7
8 #include <linux/fs.h> /* only for vma_is_dax() */
9
10 extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *vma);
14 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
18
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26
27 extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr,
30 pmd_t *pmd,
31 unsigned int flags);
32 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
33 struct vm_area_struct *vma,
34 pmd_t *pmd, unsigned long addr, unsigned long next);
35 extern int zap_huge_pmd(struct mmu_gather *tlb,
36 struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr);
38 extern int zap_huge_pud(struct mmu_gather *tlb,
39 struct vm_area_struct *vma,
40 pud_t *pud, unsigned long addr);
41 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
42 unsigned long new_addr,
43 pmd_t *old_pmd, pmd_t *new_pmd);
44 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
45 unsigned long addr, pgprot_t newprot,
46 unsigned long cp_flags);
47 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
48 pgprot_t pgprot, bool write);
49
50 /**
51 * vmf_insert_pfn_pmd - insert a pmd size pfn
52 * @vmf: Structure describing the fault
53 * @pfn: pfn to insert
54 * @pgprot: page protection to use
55 * @write: whether it's a write fault
56 *
57 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
58 *
59 * Return: vm_fault_t value.
60 */
vmf_insert_pfn_pmd(struct vm_fault * vmf,pfn_t pfn,bool write)61 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
62 bool write)
63 {
64 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
65 }
66 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
67 pgprot_t pgprot, bool write);
68
69 /**
70 * vmf_insert_pfn_pud - insert a pud size pfn
71 * @vmf: Structure describing the fault
72 * @pfn: pfn to insert
73 * @pgprot: page protection to use
74 * @write: whether it's a write fault
75 *
76 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
77 *
78 * Return: vm_fault_t value.
79 */
vmf_insert_pfn_pud(struct vm_fault * vmf,pfn_t pfn,bool write)80 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
81 bool write)
82 {
83 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
84 }
85
86 enum transparent_hugepage_flag {
87 TRANSPARENT_HUGEPAGE_FLAG,
88 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
89 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
90 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
91 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
92 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
93 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
94 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
95 #ifdef CONFIG_DEBUG_VM
96 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
97 #endif
98 };
99
100 struct kobject;
101 struct kobj_attribute;
102
103 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
104 struct kobj_attribute *attr,
105 const char *buf, size_t count,
106 enum transparent_hugepage_flag flag);
107 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
108 struct kobj_attribute *attr, char *buf,
109 enum transparent_hugepage_flag flag);
110 extern struct kobj_attribute shmem_enabled_attr;
111
112 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
113 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
114
115 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
116 #define HPAGE_PMD_SHIFT PMD_SHIFT
117 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
118 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
119
120 #define HPAGE_PUD_SHIFT PUD_SHIFT
121 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
122 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
123
124 extern unsigned long transparent_hugepage_flags;
125
126 /*
127 * to be used on vmas which are known to support THP.
128 * Use transparent_hugepage_enabled otherwise
129 */
__transparent_hugepage_enabled(struct vm_area_struct * vma)130 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
131 {
132 if (vma->vm_flags & VM_NOHUGEPAGE)
133 return false;
134
135 if (vma_is_temporary_stack(vma))
136 return false;
137
138 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
139 return false;
140
141 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
142 return true;
143 /*
144 * For dax vmas, try to always use hugepage mappings. If the kernel does
145 * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
146 * mappings, and device-dax namespaces, that try to guarantee a given
147 * mapping size, will fail to enable
148 */
149 if (vma_is_dax(vma))
150 return true;
151
152 if (transparent_hugepage_flags &
153 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
154 return !!(vma->vm_flags & VM_HUGEPAGE);
155
156 return false;
157 }
158
159 bool transparent_hugepage_enabled(struct vm_area_struct *vma);
160
161 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
162
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long haddr)163 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
164 unsigned long haddr)
165 {
166 /* Don't have to check pgoff for anonymous vma */
167 if (!vma_is_anonymous(vma)) {
168 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
169 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
170 return false;
171 }
172
173 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
174 return false;
175 return true;
176 }
177
178 #define transparent_hugepage_use_zero_page() \
179 (transparent_hugepage_flags & \
180 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
181
182 extern unsigned long thp_get_unmapped_area(struct file *filp,
183 unsigned long addr, unsigned long len, unsigned long pgoff,
184 unsigned long flags);
185
186 extern void prep_transhuge_page(struct page *page);
187 extern void free_transhuge_page(struct page *page);
188 bool is_transparent_hugepage(struct page *page);
189
190 bool can_split_huge_page(struct page *page, int *pextra_pins);
191 int split_huge_page_to_list(struct page *page, struct list_head *list);
split_huge_page(struct page * page)192 static inline int split_huge_page(struct page *page)
193 {
194 return split_huge_page_to_list(page, NULL);
195 }
196 void deferred_split_huge_page(struct page *page);
197
198 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
199 unsigned long address, bool freeze, struct page *page);
200
201 #define split_huge_pmd(__vma, __pmd, __address) \
202 do { \
203 pmd_t *____pmd = (__pmd); \
204 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
205 || pmd_devmap(*____pmd)) \
206 __split_huge_pmd(__vma, __pmd, __address, \
207 false, NULL); \
208 } while (0)
209
210
211 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
212 bool freeze, struct page *page);
213
214 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
215 unsigned long address);
216
217 #define split_huge_pud(__vma, __pud, __address) \
218 do { \
219 pud_t *____pud = (__pud); \
220 if (pud_trans_huge(*____pud) \
221 || pud_devmap(*____pud)) \
222 __split_huge_pud(__vma, __pud, __address); \
223 } while (0)
224
225 extern int hugepage_madvise(struct vm_area_struct *vma,
226 unsigned long *vm_flags, int advice);
227 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
228 unsigned long start,
229 unsigned long end,
230 long adjust_next);
231 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
232 struct vm_area_struct *vma);
233 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
234 struct vm_area_struct *vma);
235
is_swap_pmd(pmd_t pmd)236 static inline int is_swap_pmd(pmd_t pmd)
237 {
238 return !pmd_none(pmd) && !pmd_present(pmd);
239 }
240
241 /* mmap_lock must be held on entry */
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)242 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
243 struct vm_area_struct *vma)
244 {
245 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
246 return __pmd_trans_huge_lock(pmd, vma);
247 else
248 return NULL;
249 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)250 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
251 struct vm_area_struct *vma)
252 {
253 if (pud_trans_huge(*pud) || pud_devmap(*pud))
254 return __pud_trans_huge_lock(pud, vma);
255 else
256 return NULL;
257 }
258
259 /**
260 * thp_head - Head page of a transparent huge page.
261 * @page: Any page (tail, head or regular) found in the page cache.
262 */
thp_head(struct page * page)263 static inline struct page *thp_head(struct page *page)
264 {
265 return compound_head(page);
266 }
267
268 /**
269 * thp_order - Order of a transparent huge page.
270 * @page: Head page of a transparent huge page.
271 */
thp_order(struct page * page)272 static inline unsigned int thp_order(struct page *page)
273 {
274 VM_BUG_ON_PGFLAGS(PageTail(page), page);
275 if (PageHead(page))
276 return HPAGE_PMD_ORDER;
277 return 0;
278 }
279
280 /**
281 * thp_nr_pages - The number of regular pages in this huge page.
282 * @page: The head page of a huge page.
283 */
thp_nr_pages(struct page * page)284 static inline int thp_nr_pages(struct page *page)
285 {
286 VM_BUG_ON_PGFLAGS(PageTail(page), page);
287 if (PageHead(page))
288 return HPAGE_PMD_NR;
289 return 1;
290 }
291
292 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
293 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
294 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
295 pud_t *pud, int flags, struct dev_pagemap **pgmap);
296
297 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
298
299 extern struct page *huge_zero_page;
300
is_huge_zero_page(struct page * page)301 static inline bool is_huge_zero_page(struct page *page)
302 {
303 return READ_ONCE(huge_zero_page) == page;
304 }
305
is_huge_zero_pmd(pmd_t pmd)306 static inline bool is_huge_zero_pmd(pmd_t pmd)
307 {
308 return is_huge_zero_page(pmd_page(pmd));
309 }
310
is_huge_zero_pud(pud_t pud)311 static inline bool is_huge_zero_pud(pud_t pud)
312 {
313 return false;
314 }
315
316 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
317 void mm_put_huge_zero_page(struct mm_struct *mm);
318
319 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
320
thp_migration_supported(void)321 static inline bool thp_migration_supported(void)
322 {
323 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
324 }
325
page_deferred_list(struct page * page)326 static inline struct list_head *page_deferred_list(struct page *page)
327 {
328 /*
329 * Global or memcg deferred list in the second tail pages is
330 * occupied by compound_head.
331 */
332 return &page[2].deferred_list;
333 }
334
335 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
336 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
337 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
338 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
339
340 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
341 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
342 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
343
thp_head(struct page * page)344 static inline struct page *thp_head(struct page *page)
345 {
346 VM_BUG_ON_PGFLAGS(PageTail(page), page);
347 return page;
348 }
349
thp_order(struct page * page)350 static inline unsigned int thp_order(struct page *page)
351 {
352 VM_BUG_ON_PGFLAGS(PageTail(page), page);
353 return 0;
354 }
355
thp_nr_pages(struct page * page)356 static inline int thp_nr_pages(struct page *page)
357 {
358 VM_BUG_ON_PGFLAGS(PageTail(page), page);
359 return 1;
360 }
361
__transparent_hugepage_enabled(struct vm_area_struct * vma)362 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
363 {
364 return false;
365 }
366
transparent_hugepage_enabled(struct vm_area_struct * vma)367 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
368 {
369 return false;
370 }
371
transhuge_vma_suitable(struct vm_area_struct * vma,unsigned long haddr)372 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
373 unsigned long haddr)
374 {
375 return false;
376 }
377
prep_transhuge_page(struct page * page)378 static inline void prep_transhuge_page(struct page *page) {}
379
is_transparent_hugepage(struct page * page)380 static inline bool is_transparent_hugepage(struct page *page)
381 {
382 return false;
383 }
384
385 #define transparent_hugepage_flags 0UL
386
387 #define thp_get_unmapped_area NULL
388
389 static inline bool
can_split_huge_page(struct page * page,int * pextra_pins)390 can_split_huge_page(struct page *page, int *pextra_pins)
391 {
392 BUILD_BUG();
393 return false;
394 }
395 static inline int
split_huge_page_to_list(struct page * page,struct list_head * list)396 split_huge_page_to_list(struct page *page, struct list_head *list)
397 {
398 return 0;
399 }
split_huge_page(struct page * page)400 static inline int split_huge_page(struct page *page)
401 {
402 return 0;
403 }
deferred_split_huge_page(struct page * page)404 static inline void deferred_split_huge_page(struct page *page) {}
405 #define split_huge_pmd(__vma, __pmd, __address) \
406 do { } while (0)
407
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze,struct page * page)408 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
409 unsigned long address, bool freeze, struct page *page) {}
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze,struct page * page)410 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
411 unsigned long address, bool freeze, struct page *page) {}
412
413 #define split_huge_pud(__vma, __pmd, __address) \
414 do { } while (0)
415
hugepage_madvise(struct vm_area_struct * vma,unsigned long * vm_flags,int advice)416 static inline int hugepage_madvise(struct vm_area_struct *vma,
417 unsigned long *vm_flags, int advice)
418 {
419 BUG();
420 return 0;
421 }
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)422 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
423 unsigned long start,
424 unsigned long end,
425 long adjust_next)
426 {
427 }
is_swap_pmd(pmd_t pmd)428 static inline int is_swap_pmd(pmd_t pmd)
429 {
430 return 0;
431 }
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)432 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
433 struct vm_area_struct *vma)
434 {
435 return NULL;
436 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)437 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
438 struct vm_area_struct *vma)
439 {
440 return NULL;
441 }
442
do_huge_pmd_numa_page(struct vm_fault * vmf,pmd_t orig_pmd)443 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
444 pmd_t orig_pmd)
445 {
446 return 0;
447 }
448
is_huge_zero_page(struct page * page)449 static inline bool is_huge_zero_page(struct page *page)
450 {
451 return false;
452 }
453
is_huge_zero_pud(pud_t pud)454 static inline bool is_huge_zero_pud(pud_t pud)
455 {
456 return false;
457 }
458
mm_put_huge_zero_page(struct mm_struct * mm)459 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
460 {
461 return;
462 }
463
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)464 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
465 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
466 {
467 return NULL;
468 }
469
follow_devmap_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,int flags,struct dev_pagemap ** pgmap)470 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
471 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
472 {
473 return NULL;
474 }
475
thp_migration_supported(void)476 static inline bool thp_migration_supported(void)
477 {
478 return false;
479 }
480 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
481
482 /**
483 * thp_size - Size of a transparent huge page.
484 * @page: Head page of a transparent huge page.
485 *
486 * Return: Number of bytes in this page.
487 */
thp_size(struct page * page)488 static inline unsigned long thp_size(struct page *page)
489 {
490 return PAGE_SIZE << thp_order(page);
491 }
492
493 #endif /* _LINUX_HUGE_MM_H */
494