1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2009 Red Hat, Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
40
41 #include <asm/tlb.h>
42 #include <asm/pgalloc.h>
43 #include "internal.h"
44 #include "swap.h"
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/thp.h>
48
49 /*
50 * By default, transparent hugepage support is disabled in order to avoid
51 * risking an increased memory footprint for applications that are not
52 * guaranteed to benefit from it. When transparent hugepage support is
53 * enabled, it is for all mappings, and khugepaged scans all mappings.
54 * Defrag is invoked by khugepaged hugepage allocations and by page faults
55 * for all hugepage allocations.
56 */
57 unsigned long transparent_hugepage_flags __read_mostly =
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
60 #endif
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
63 #endif
64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
67
68 static struct shrinker deferred_split_shrinker;
69
70 static atomic_t huge_zero_refcount;
71 struct page *huge_zero_page __read_mostly;
72 unsigned long huge_zero_pfn __read_mostly = ~0UL;
73
hugepage_vma_check(struct vm_area_struct * vma,unsigned long vm_flags,bool smaps,bool in_pf,bool enforce_sysfs)74 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
75 bool smaps, bool in_pf, bool enforce_sysfs)
76 {
77 if (!vma->vm_mm) /* vdso */
78 return false;
79
80 /*
81 * Explicitly disabled through madvise or prctl, or some
82 * architectures may disable THP for some mappings, for
83 * example, s390 kvm.
84 * */
85 if ((vm_flags & VM_NOHUGEPAGE) ||
86 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
87 return false;
88 /*
89 * If the hardware/firmware marked hugepage support disabled.
90 */
91 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
92 return false;
93
94 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
95 if (vma_is_dax(vma))
96 return in_pf;
97
98 /*
99 * Special VMA and hugetlb VMA.
100 * Must be checked after dax since some dax mappings may have
101 * VM_MIXEDMAP set.
102 */
103 if (vm_flags & VM_NO_KHUGEPAGED)
104 return false;
105
106 /*
107 * Check alignment for file vma and size for both file and anon vma.
108 *
109 * Skip the check for page fault. Huge fault does the check in fault
110 * handlers. And this check is not suitable for huge PUD fault.
111 */
112 if (!in_pf &&
113 !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
114 return false;
115
116 /*
117 * Enabled via shmem mount options or sysfs settings.
118 * Must be done before hugepage flags check since shmem has its
119 * own flags.
120 */
121 if (!in_pf && shmem_file(vma->vm_file))
122 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
123 !enforce_sysfs, vma->vm_mm, vm_flags);
124
125 /* Enforce sysfs THP requirements as necessary */
126 if (enforce_sysfs &&
127 (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
128 !hugepage_flags_always())))
129 return false;
130
131 /* Only regular file is valid */
132 if (!in_pf && file_thp_enabled(vma))
133 return true;
134
135 if (!vma_is_anonymous(vma))
136 return false;
137
138 if (vma_is_temporary_stack(vma))
139 return false;
140
141 /*
142 * THPeligible bit of smaps should show 1 for proper VMAs even
143 * though anon_vma is not initialized yet.
144 *
145 * Allow page fault since anon_vma may be not initialized until
146 * the first page fault.
147 */
148 if (!vma->anon_vma)
149 return (smaps || in_pf);
150
151 return true;
152 }
153
get_huge_zero_page(void)154 static bool get_huge_zero_page(void)
155 {
156 struct page *zero_page;
157 retry:
158 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
159 return true;
160
161 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
162 HPAGE_PMD_ORDER);
163 if (!zero_page) {
164 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
165 return false;
166 }
167 preempt_disable();
168 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
169 preempt_enable();
170 __free_pages(zero_page, compound_order(zero_page));
171 goto retry;
172 }
173 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
174
175 /* We take additional reference here. It will be put back by shrinker */
176 atomic_set(&huge_zero_refcount, 2);
177 preempt_enable();
178 count_vm_event(THP_ZERO_PAGE_ALLOC);
179 return true;
180 }
181
put_huge_zero_page(void)182 static void put_huge_zero_page(void)
183 {
184 /*
185 * Counter should never go to zero here. Only shrinker can put
186 * last reference.
187 */
188 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
189 }
190
mm_get_huge_zero_page(struct mm_struct * mm)191 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
192 {
193 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
194 return READ_ONCE(huge_zero_page);
195
196 if (!get_huge_zero_page())
197 return NULL;
198
199 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
200 put_huge_zero_page();
201
202 return READ_ONCE(huge_zero_page);
203 }
204
mm_put_huge_zero_page(struct mm_struct * mm)205 void mm_put_huge_zero_page(struct mm_struct *mm)
206 {
207 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
208 put_huge_zero_page();
209 }
210
shrink_huge_zero_page_count(struct shrinker * shrink,struct shrink_control * sc)211 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
212 struct shrink_control *sc)
213 {
214 /* we can free zero page only if last reference remains */
215 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
216 }
217
shrink_huge_zero_page_scan(struct shrinker * shrink,struct shrink_control * sc)218 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
219 struct shrink_control *sc)
220 {
221 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
222 struct page *zero_page = xchg(&huge_zero_page, NULL);
223 BUG_ON(zero_page == NULL);
224 WRITE_ONCE(huge_zero_pfn, ~0UL);
225 __free_pages(zero_page, compound_order(zero_page));
226 return HPAGE_PMD_NR;
227 }
228
229 return 0;
230 }
231
232 static struct shrinker huge_zero_page_shrinker = {
233 .count_objects = shrink_huge_zero_page_count,
234 .scan_objects = shrink_huge_zero_page_scan,
235 .seeks = DEFAULT_SEEKS,
236 };
237
238 #ifdef CONFIG_SYSFS
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)239 static ssize_t enabled_show(struct kobject *kobj,
240 struct kobj_attribute *attr, char *buf)
241 {
242 const char *output;
243
244 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
245 output = "[always] madvise never";
246 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
247 &transparent_hugepage_flags))
248 output = "always [madvise] never";
249 else
250 output = "always madvise [never]";
251
252 return sysfs_emit(buf, "%s\n", output);
253 }
254
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)255 static ssize_t enabled_store(struct kobject *kobj,
256 struct kobj_attribute *attr,
257 const char *buf, size_t count)
258 {
259 ssize_t ret = count;
260
261 if (sysfs_streq(buf, "always")) {
262 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
263 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
264 } else if (sysfs_streq(buf, "madvise")) {
265 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
266 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
267 } else if (sysfs_streq(buf, "never")) {
268 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
269 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
270 } else
271 ret = -EINVAL;
272
273 if (ret > 0) {
274 int err = start_stop_khugepaged();
275 if (err)
276 ret = err;
277 }
278 return ret;
279 }
280
281 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
282
single_hugepage_flag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf,enum transparent_hugepage_flag flag)283 ssize_t single_hugepage_flag_show(struct kobject *kobj,
284 struct kobj_attribute *attr, char *buf,
285 enum transparent_hugepage_flag flag)
286 {
287 return sysfs_emit(buf, "%d\n",
288 !!test_bit(flag, &transparent_hugepage_flags));
289 }
290
single_hugepage_flag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count,enum transparent_hugepage_flag flag)291 ssize_t single_hugepage_flag_store(struct kobject *kobj,
292 struct kobj_attribute *attr,
293 const char *buf, size_t count,
294 enum transparent_hugepage_flag flag)
295 {
296 unsigned long value;
297 int ret;
298
299 ret = kstrtoul(buf, 10, &value);
300 if (ret < 0)
301 return ret;
302 if (value > 1)
303 return -EINVAL;
304
305 if (value)
306 set_bit(flag, &transparent_hugepage_flags);
307 else
308 clear_bit(flag, &transparent_hugepage_flags);
309
310 return count;
311 }
312
defrag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)313 static ssize_t defrag_show(struct kobject *kobj,
314 struct kobj_attribute *attr, char *buf)
315 {
316 const char *output;
317
318 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
319 &transparent_hugepage_flags))
320 output = "[always] defer defer+madvise madvise never";
321 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
322 &transparent_hugepage_flags))
323 output = "always [defer] defer+madvise madvise never";
324 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
325 &transparent_hugepage_flags))
326 output = "always defer [defer+madvise] madvise never";
327 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
328 &transparent_hugepage_flags))
329 output = "always defer defer+madvise [madvise] never";
330 else
331 output = "always defer defer+madvise madvise [never]";
332
333 return sysfs_emit(buf, "%s\n", output);
334 }
335
defrag_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)336 static ssize_t defrag_store(struct kobject *kobj,
337 struct kobj_attribute *attr,
338 const char *buf, size_t count)
339 {
340 if (sysfs_streq(buf, "always")) {
341 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
342 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
343 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
344 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
345 } else if (sysfs_streq(buf, "defer+madvise")) {
346 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
347 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
348 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
349 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
350 } else if (sysfs_streq(buf, "defer")) {
351 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
352 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
353 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
354 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
355 } else if (sysfs_streq(buf, "madvise")) {
356 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
357 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
358 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
359 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
360 } else if (sysfs_streq(buf, "never")) {
361 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
362 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
363 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
364 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
365 } else
366 return -EINVAL;
367
368 return count;
369 }
370 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
371
use_zero_page_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)372 static ssize_t use_zero_page_show(struct kobject *kobj,
373 struct kobj_attribute *attr, char *buf)
374 {
375 return single_hugepage_flag_show(kobj, attr, buf,
376 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
377 }
use_zero_page_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)378 static ssize_t use_zero_page_store(struct kobject *kobj,
379 struct kobj_attribute *attr, const char *buf, size_t count)
380 {
381 return single_hugepage_flag_store(kobj, attr, buf, count,
382 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
383 }
384 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
385
hpage_pmd_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)386 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
387 struct kobj_attribute *attr, char *buf)
388 {
389 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
390 }
391 static struct kobj_attribute hpage_pmd_size_attr =
392 __ATTR_RO(hpage_pmd_size);
393
394 static struct attribute *hugepage_attr[] = {
395 &enabled_attr.attr,
396 &defrag_attr.attr,
397 &use_zero_page_attr.attr,
398 &hpage_pmd_size_attr.attr,
399 #ifdef CONFIG_SHMEM
400 &shmem_enabled_attr.attr,
401 #endif
402 NULL,
403 };
404
405 static const struct attribute_group hugepage_attr_group = {
406 .attrs = hugepage_attr,
407 };
408
hugepage_init_sysfs(struct kobject ** hugepage_kobj)409 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
410 {
411 int err;
412
413 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
414 if (unlikely(!*hugepage_kobj)) {
415 pr_err("failed to create transparent hugepage kobject\n");
416 return -ENOMEM;
417 }
418
419 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
420 if (err) {
421 pr_err("failed to register transparent hugepage group\n");
422 goto delete_obj;
423 }
424
425 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
426 if (err) {
427 pr_err("failed to register transparent hugepage group\n");
428 goto remove_hp_group;
429 }
430
431 return 0;
432
433 remove_hp_group:
434 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
435 delete_obj:
436 kobject_put(*hugepage_kobj);
437 return err;
438 }
439
hugepage_exit_sysfs(struct kobject * hugepage_kobj)440 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
441 {
442 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
443 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
444 kobject_put(hugepage_kobj);
445 }
446 #else
hugepage_init_sysfs(struct kobject ** hugepage_kobj)447 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
448 {
449 return 0;
450 }
451
hugepage_exit_sysfs(struct kobject * hugepage_kobj)452 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
453 {
454 }
455 #endif /* CONFIG_SYSFS */
456
hugepage_init(void)457 static int __init hugepage_init(void)
458 {
459 int err;
460 struct kobject *hugepage_kobj;
461
462 if (!has_transparent_hugepage()) {
463 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
464 return -EINVAL;
465 }
466
467 /*
468 * hugepages can't be allocated by the buddy allocator
469 */
470 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER);
471 /*
472 * we use page->mapping and page->index in second tail page
473 * as list_head: assuming THP order >= 2
474 */
475 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
476
477 err = hugepage_init_sysfs(&hugepage_kobj);
478 if (err)
479 goto err_sysfs;
480
481 err = khugepaged_init();
482 if (err)
483 goto err_slab;
484
485 err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
486 if (err)
487 goto err_hzp_shrinker;
488 err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
489 if (err)
490 goto err_split_shrinker;
491
492 /*
493 * By default disable transparent hugepages on smaller systems,
494 * where the extra memory used could hurt more than TLB overhead
495 * is likely to save. The admin can still enable it through /sys.
496 */
497 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
498 transparent_hugepage_flags = 0;
499 return 0;
500 }
501
502 err = start_stop_khugepaged();
503 if (err)
504 goto err_khugepaged;
505
506 return 0;
507 err_khugepaged:
508 unregister_shrinker(&deferred_split_shrinker);
509 err_split_shrinker:
510 unregister_shrinker(&huge_zero_page_shrinker);
511 err_hzp_shrinker:
512 khugepaged_destroy();
513 err_slab:
514 hugepage_exit_sysfs(hugepage_kobj);
515 err_sysfs:
516 return err;
517 }
518 subsys_initcall(hugepage_init);
519
setup_transparent_hugepage(char * str)520 static int __init setup_transparent_hugepage(char *str)
521 {
522 int ret = 0;
523 if (!str)
524 goto out;
525 if (!strcmp(str, "always")) {
526 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
527 &transparent_hugepage_flags);
528 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
529 &transparent_hugepage_flags);
530 ret = 1;
531 } else if (!strcmp(str, "madvise")) {
532 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
533 &transparent_hugepage_flags);
534 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
535 &transparent_hugepage_flags);
536 ret = 1;
537 } else if (!strcmp(str, "never")) {
538 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
539 &transparent_hugepage_flags);
540 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
541 &transparent_hugepage_flags);
542 ret = 1;
543 }
544 out:
545 if (!ret)
546 pr_warn("transparent_hugepage= cannot parse, ignored\n");
547 return ret;
548 }
549 __setup("transparent_hugepage=", setup_transparent_hugepage);
550
maybe_pmd_mkwrite(pmd_t pmd,struct vm_area_struct * vma)551 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
552 {
553 if (likely(vma->vm_flags & VM_WRITE))
554 pmd = pmd_mkwrite(pmd, vma);
555 return pmd;
556 }
557
558 #ifdef CONFIG_MEMCG
559 static inline
get_deferred_split_queue(struct folio * folio)560 struct deferred_split *get_deferred_split_queue(struct folio *folio)
561 {
562 struct mem_cgroup *memcg = folio_memcg(folio);
563 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
564
565 if (memcg)
566 return &memcg->deferred_split_queue;
567 else
568 return &pgdat->deferred_split_queue;
569 }
570 #else
571 static inline
get_deferred_split_queue(struct folio * folio)572 struct deferred_split *get_deferred_split_queue(struct folio *folio)
573 {
574 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
575
576 return &pgdat->deferred_split_queue;
577 }
578 #endif
579
folio_prep_large_rmappable(struct folio * folio)580 void folio_prep_large_rmappable(struct folio *folio)
581 {
582 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
583 INIT_LIST_HEAD(&folio->_deferred_list);
584 folio_set_large_rmappable(folio);
585 }
586
is_transparent_hugepage(struct folio * folio)587 static inline bool is_transparent_hugepage(struct folio *folio)
588 {
589 if (!folio_test_large(folio))
590 return false;
591
592 return is_huge_zero_page(&folio->page) ||
593 folio_test_large_rmappable(folio);
594 }
595
__thp_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,loff_t off,unsigned long flags,unsigned long size)596 static unsigned long __thp_get_unmapped_area(struct file *filp,
597 unsigned long addr, unsigned long len,
598 loff_t off, unsigned long flags, unsigned long size)
599 {
600 loff_t off_end = off + len;
601 loff_t off_align = round_up(off, size);
602 unsigned long len_pad, ret;
603
604 if (off_end <= off_align || (off_end - off_align) < size)
605 return 0;
606
607 len_pad = len + size;
608 if (len_pad < len || (off + len_pad) < off)
609 return 0;
610
611 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
612 off >> PAGE_SHIFT, flags);
613
614 /*
615 * The failure might be due to length padding. The caller will retry
616 * without the padding.
617 */
618 if (IS_ERR_VALUE(ret))
619 return 0;
620
621 /*
622 * Do not try to align to THP boundary if allocation at the address
623 * hint succeeds.
624 */
625 if (ret == addr)
626 return addr;
627
628 ret += (off - ret) & (size - 1);
629 return ret;
630 }
631
thp_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)632 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
633 unsigned long len, unsigned long pgoff, unsigned long flags)
634 {
635 unsigned long ret;
636 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
637
638 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
639 if (ret)
640 return ret;
641
642 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
643 }
644 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
645
__do_huge_pmd_anonymous_page(struct vm_fault * vmf,struct page * page,gfp_t gfp)646 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
647 struct page *page, gfp_t gfp)
648 {
649 struct vm_area_struct *vma = vmf->vma;
650 struct folio *folio = page_folio(page);
651 pgtable_t pgtable;
652 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
653 vm_fault_t ret = 0;
654
655 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
656
657 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
658 folio_put(folio);
659 count_vm_event(THP_FAULT_FALLBACK);
660 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
661 return VM_FAULT_FALLBACK;
662 }
663 folio_throttle_swaprate(folio, gfp);
664
665 pgtable = pte_alloc_one(vma->vm_mm);
666 if (unlikely(!pgtable)) {
667 ret = VM_FAULT_OOM;
668 goto release;
669 }
670
671 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
672 /*
673 * The memory barrier inside __folio_mark_uptodate makes sure that
674 * clear_huge_page writes become visible before the set_pmd_at()
675 * write.
676 */
677 __folio_mark_uptodate(folio);
678
679 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
680 if (unlikely(!pmd_none(*vmf->pmd))) {
681 goto unlock_release;
682 } else {
683 pmd_t entry;
684
685 ret = check_stable_address_space(vma->vm_mm);
686 if (ret)
687 goto unlock_release;
688
689 /* Deliver the page fault to userland */
690 if (userfaultfd_missing(vma)) {
691 spin_unlock(vmf->ptl);
692 folio_put(folio);
693 pte_free(vma->vm_mm, pgtable);
694 ret = handle_userfault(vmf, VM_UFFD_MISSING);
695 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
696 return ret;
697 }
698
699 entry = mk_huge_pmd(page, vma->vm_page_prot);
700 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
701 folio_add_new_anon_rmap(folio, vma, haddr);
702 folio_add_lru_vma(folio, vma);
703 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
704 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
705 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
706 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
707 mm_inc_nr_ptes(vma->vm_mm);
708 spin_unlock(vmf->ptl);
709 count_vm_event(THP_FAULT_ALLOC);
710 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
711 }
712
713 return 0;
714 unlock_release:
715 spin_unlock(vmf->ptl);
716 release:
717 if (pgtable)
718 pte_free(vma->vm_mm, pgtable);
719 folio_put(folio);
720 return ret;
721
722 }
723
724 /*
725 * always: directly stall for all thp allocations
726 * defer: wake kswapd and fail if not immediately available
727 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
728 * fail if not immediately available
729 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
730 * available
731 * never: never stall for any thp allocation
732 */
vma_thp_gfp_mask(struct vm_area_struct * vma)733 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
734 {
735 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
736
737 /* Always do synchronous compaction */
738 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
739 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
740
741 /* Kick kcompactd and fail quickly */
742 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
743 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
744
745 /* Synchronous compaction if madvised, otherwise kick kcompactd */
746 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
747 return GFP_TRANSHUGE_LIGHT |
748 (vma_madvised ? __GFP_DIRECT_RECLAIM :
749 __GFP_KSWAPD_RECLAIM);
750
751 /* Only do synchronous compaction if madvised */
752 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
753 return GFP_TRANSHUGE_LIGHT |
754 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
755
756 return GFP_TRANSHUGE_LIGHT;
757 }
758
759 /* Caller must hold page table lock. */
set_huge_zero_page(pgtable_t pgtable,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd,struct page * zero_page)760 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
761 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
762 struct page *zero_page)
763 {
764 pmd_t entry;
765 if (!pmd_none(*pmd))
766 return;
767 entry = mk_pmd(zero_page, vma->vm_page_prot);
768 entry = pmd_mkhuge(entry);
769 pgtable_trans_huge_deposit(mm, pmd, pgtable);
770 set_pmd_at(mm, haddr, pmd, entry);
771 mm_inc_nr_ptes(mm);
772 }
773
do_huge_pmd_anonymous_page(struct vm_fault * vmf)774 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
775 {
776 struct vm_area_struct *vma = vmf->vma;
777 gfp_t gfp;
778 struct folio *folio;
779 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
780
781 if (!transhuge_vma_suitable(vma, haddr))
782 return VM_FAULT_FALLBACK;
783 if (unlikely(anon_vma_prepare(vma)))
784 return VM_FAULT_OOM;
785 khugepaged_enter_vma(vma, vma->vm_flags);
786
787 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
788 !mm_forbids_zeropage(vma->vm_mm) &&
789 transparent_hugepage_use_zero_page()) {
790 pgtable_t pgtable;
791 struct page *zero_page;
792 vm_fault_t ret;
793 pgtable = pte_alloc_one(vma->vm_mm);
794 if (unlikely(!pgtable))
795 return VM_FAULT_OOM;
796 zero_page = mm_get_huge_zero_page(vma->vm_mm);
797 if (unlikely(!zero_page)) {
798 pte_free(vma->vm_mm, pgtable);
799 count_vm_event(THP_FAULT_FALLBACK);
800 return VM_FAULT_FALLBACK;
801 }
802 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
803 ret = 0;
804 if (pmd_none(*vmf->pmd)) {
805 ret = check_stable_address_space(vma->vm_mm);
806 if (ret) {
807 spin_unlock(vmf->ptl);
808 pte_free(vma->vm_mm, pgtable);
809 } else if (userfaultfd_missing(vma)) {
810 spin_unlock(vmf->ptl);
811 pte_free(vma->vm_mm, pgtable);
812 ret = handle_userfault(vmf, VM_UFFD_MISSING);
813 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
814 } else {
815 set_huge_zero_page(pgtable, vma->vm_mm, vma,
816 haddr, vmf->pmd, zero_page);
817 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
818 spin_unlock(vmf->ptl);
819 }
820 } else {
821 spin_unlock(vmf->ptl);
822 pte_free(vma->vm_mm, pgtable);
823 }
824 return ret;
825 }
826 gfp = vma_thp_gfp_mask(vma);
827 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
828 if (unlikely(!folio)) {
829 count_vm_event(THP_FAULT_FALLBACK);
830 return VM_FAULT_FALLBACK;
831 }
832 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
833 }
834
insert_pfn_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,pfn_t pfn,pgprot_t prot,bool write,pgtable_t pgtable)835 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
836 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
837 pgtable_t pgtable)
838 {
839 struct mm_struct *mm = vma->vm_mm;
840 pmd_t entry;
841 spinlock_t *ptl;
842
843 ptl = pmd_lock(mm, pmd);
844 if (!pmd_none(*pmd)) {
845 if (write) {
846 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
847 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
848 goto out_unlock;
849 }
850 entry = pmd_mkyoung(*pmd);
851 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
852 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
853 update_mmu_cache_pmd(vma, addr, pmd);
854 }
855
856 goto out_unlock;
857 }
858
859 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
860 if (pfn_t_devmap(pfn))
861 entry = pmd_mkdevmap(entry);
862 if (write) {
863 entry = pmd_mkyoung(pmd_mkdirty(entry));
864 entry = maybe_pmd_mkwrite(entry, vma);
865 }
866
867 if (pgtable) {
868 pgtable_trans_huge_deposit(mm, pmd, pgtable);
869 mm_inc_nr_ptes(mm);
870 pgtable = NULL;
871 }
872
873 set_pmd_at(mm, addr, pmd, entry);
874 update_mmu_cache_pmd(vma, addr, pmd);
875
876 out_unlock:
877 spin_unlock(ptl);
878 if (pgtable)
879 pte_free(mm, pgtable);
880 }
881
882 /**
883 * vmf_insert_pfn_pmd - insert a pmd size pfn
884 * @vmf: Structure describing the fault
885 * @pfn: pfn to insert
886 * @write: whether it's a write fault
887 *
888 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
889 *
890 * Return: vm_fault_t value.
891 */
vmf_insert_pfn_pmd(struct vm_fault * vmf,pfn_t pfn,bool write)892 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
893 {
894 unsigned long addr = vmf->address & PMD_MASK;
895 struct vm_area_struct *vma = vmf->vma;
896 pgprot_t pgprot = vma->vm_page_prot;
897 pgtable_t pgtable = NULL;
898
899 /*
900 * If we had pmd_special, we could avoid all these restrictions,
901 * but we need to be consistent with PTEs and architectures that
902 * can't support a 'special' bit.
903 */
904 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
905 !pfn_t_devmap(pfn));
906 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
907 (VM_PFNMAP|VM_MIXEDMAP));
908 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
909
910 if (addr < vma->vm_start || addr >= vma->vm_end)
911 return VM_FAULT_SIGBUS;
912
913 if (arch_needs_pgtable_deposit()) {
914 pgtable = pte_alloc_one(vma->vm_mm);
915 if (!pgtable)
916 return VM_FAULT_OOM;
917 }
918
919 track_pfn_insert(vma, &pgprot, pfn);
920
921 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
922 return VM_FAULT_NOPAGE;
923 }
924 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
925
926 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
maybe_pud_mkwrite(pud_t pud,struct vm_area_struct * vma)927 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
928 {
929 if (likely(vma->vm_flags & VM_WRITE))
930 pud = pud_mkwrite(pud);
931 return pud;
932 }
933
insert_pfn_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,pfn_t pfn,bool write)934 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
935 pud_t *pud, pfn_t pfn, bool write)
936 {
937 struct mm_struct *mm = vma->vm_mm;
938 pgprot_t prot = vma->vm_page_prot;
939 pud_t entry;
940 spinlock_t *ptl;
941
942 ptl = pud_lock(mm, pud);
943 if (!pud_none(*pud)) {
944 if (write) {
945 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
946 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
947 goto out_unlock;
948 }
949 entry = pud_mkyoung(*pud);
950 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
951 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
952 update_mmu_cache_pud(vma, addr, pud);
953 }
954 goto out_unlock;
955 }
956
957 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
958 if (pfn_t_devmap(pfn))
959 entry = pud_mkdevmap(entry);
960 if (write) {
961 entry = pud_mkyoung(pud_mkdirty(entry));
962 entry = maybe_pud_mkwrite(entry, vma);
963 }
964 set_pud_at(mm, addr, pud, entry);
965 update_mmu_cache_pud(vma, addr, pud);
966
967 out_unlock:
968 spin_unlock(ptl);
969 }
970
971 /**
972 * vmf_insert_pfn_pud - insert a pud size pfn
973 * @vmf: Structure describing the fault
974 * @pfn: pfn to insert
975 * @write: whether it's a write fault
976 *
977 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
978 *
979 * Return: vm_fault_t value.
980 */
vmf_insert_pfn_pud(struct vm_fault * vmf,pfn_t pfn,bool write)981 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
982 {
983 unsigned long addr = vmf->address & PUD_MASK;
984 struct vm_area_struct *vma = vmf->vma;
985 pgprot_t pgprot = vma->vm_page_prot;
986
987 /*
988 * If we had pud_special, we could avoid all these restrictions,
989 * but we need to be consistent with PTEs and architectures that
990 * can't support a 'special' bit.
991 */
992 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
993 !pfn_t_devmap(pfn));
994 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
995 (VM_PFNMAP|VM_MIXEDMAP));
996 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
997
998 if (addr < vma->vm_start || addr >= vma->vm_end)
999 return VM_FAULT_SIGBUS;
1000
1001 track_pfn_insert(vma, &pgprot, pfn);
1002
1003 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1004 return VM_FAULT_NOPAGE;
1005 }
1006 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1007 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1008
touch_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,bool write)1009 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1010 pmd_t *pmd, bool write)
1011 {
1012 pmd_t _pmd;
1013
1014 _pmd = pmd_mkyoung(*pmd);
1015 if (write)
1016 _pmd = pmd_mkdirty(_pmd);
1017 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1018 pmd, _pmd, write))
1019 update_mmu_cache_pmd(vma, addr, pmd);
1020 }
1021
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)1022 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1023 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1024 {
1025 unsigned long pfn = pmd_pfn(*pmd);
1026 struct mm_struct *mm = vma->vm_mm;
1027 struct page *page;
1028 int ret;
1029
1030 assert_spin_locked(pmd_lockptr(mm, pmd));
1031
1032 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1033 return NULL;
1034
1035 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1036 /* pass */;
1037 else
1038 return NULL;
1039
1040 if (flags & FOLL_TOUCH)
1041 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1042
1043 /*
1044 * device mapped pages can only be returned if the
1045 * caller will manage the page reference count.
1046 */
1047 if (!(flags & (FOLL_GET | FOLL_PIN)))
1048 return ERR_PTR(-EEXIST);
1049
1050 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1051 *pgmap = get_dev_pagemap(pfn, *pgmap);
1052 if (!*pgmap)
1053 return ERR_PTR(-EFAULT);
1054 page = pfn_to_page(pfn);
1055 ret = try_grab_page(page, flags);
1056 if (ret)
1057 page = ERR_PTR(ret);
1058
1059 return page;
1060 }
1061
copy_huge_pmd(struct mm_struct * dst_mm,struct mm_struct * src_mm,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1062 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1063 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1064 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1065 {
1066 spinlock_t *dst_ptl, *src_ptl;
1067 struct page *src_page;
1068 pmd_t pmd;
1069 pgtable_t pgtable = NULL;
1070 int ret = -ENOMEM;
1071
1072 /* Skip if can be re-fill on fault */
1073 if (!vma_is_anonymous(dst_vma))
1074 return 0;
1075
1076 pgtable = pte_alloc_one(dst_mm);
1077 if (unlikely(!pgtable))
1078 goto out;
1079
1080 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1081 src_ptl = pmd_lockptr(src_mm, src_pmd);
1082 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1083
1084 ret = -EAGAIN;
1085 pmd = *src_pmd;
1086
1087 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1088 if (unlikely(is_swap_pmd(pmd))) {
1089 swp_entry_t entry = pmd_to_swp_entry(pmd);
1090
1091 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1092 if (!is_readable_migration_entry(entry)) {
1093 entry = make_readable_migration_entry(
1094 swp_offset(entry));
1095 pmd = swp_entry_to_pmd(entry);
1096 if (pmd_swp_soft_dirty(*src_pmd))
1097 pmd = pmd_swp_mksoft_dirty(pmd);
1098 if (pmd_swp_uffd_wp(*src_pmd))
1099 pmd = pmd_swp_mkuffd_wp(pmd);
1100 set_pmd_at(src_mm, addr, src_pmd, pmd);
1101 }
1102 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1103 mm_inc_nr_ptes(dst_mm);
1104 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1105 if (!userfaultfd_wp(dst_vma))
1106 pmd = pmd_swp_clear_uffd_wp(pmd);
1107 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1108 ret = 0;
1109 goto out_unlock;
1110 }
1111 #endif
1112
1113 if (unlikely(!pmd_trans_huge(pmd))) {
1114 pte_free(dst_mm, pgtable);
1115 goto out_unlock;
1116 }
1117 /*
1118 * When page table lock is held, the huge zero pmd should not be
1119 * under splitting since we don't split the page itself, only pmd to
1120 * a page table.
1121 */
1122 if (is_huge_zero_pmd(pmd)) {
1123 /*
1124 * get_huge_zero_page() will never allocate a new page here,
1125 * since we already have a zero page to copy. It just takes a
1126 * reference.
1127 */
1128 mm_get_huge_zero_page(dst_mm);
1129 goto out_zero_page;
1130 }
1131
1132 src_page = pmd_page(pmd);
1133 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1134
1135 get_page(src_page);
1136 if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
1137 /* Page maybe pinned: split and retry the fault on PTEs. */
1138 put_page(src_page);
1139 pte_free(dst_mm, pgtable);
1140 spin_unlock(src_ptl);
1141 spin_unlock(dst_ptl);
1142 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1143 return -EAGAIN;
1144 }
1145 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1146 out_zero_page:
1147 mm_inc_nr_ptes(dst_mm);
1148 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1149 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1150 if (!userfaultfd_wp(dst_vma))
1151 pmd = pmd_clear_uffd_wp(pmd);
1152 pmd = pmd_mkold(pmd_wrprotect(pmd));
1153 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1154
1155 ret = 0;
1156 out_unlock:
1157 spin_unlock(src_ptl);
1158 spin_unlock(dst_ptl);
1159 out:
1160 return ret;
1161 }
1162
1163 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
touch_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,bool write)1164 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1165 pud_t *pud, bool write)
1166 {
1167 pud_t _pud;
1168
1169 _pud = pud_mkyoung(*pud);
1170 if (write)
1171 _pud = pud_mkdirty(_pud);
1172 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1173 pud, _pud, write))
1174 update_mmu_cache_pud(vma, addr, pud);
1175 }
1176
follow_devmap_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud,int flags,struct dev_pagemap ** pgmap)1177 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1178 pud_t *pud, int flags, struct dev_pagemap **pgmap)
1179 {
1180 unsigned long pfn = pud_pfn(*pud);
1181 struct mm_struct *mm = vma->vm_mm;
1182 struct page *page;
1183 int ret;
1184
1185 assert_spin_locked(pud_lockptr(mm, pud));
1186
1187 if (flags & FOLL_WRITE && !pud_write(*pud))
1188 return NULL;
1189
1190 if (pud_present(*pud) && pud_devmap(*pud))
1191 /* pass */;
1192 else
1193 return NULL;
1194
1195 if (flags & FOLL_TOUCH)
1196 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1197
1198 /*
1199 * device mapped pages can only be returned if the
1200 * caller will manage the page reference count.
1201 *
1202 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1203 */
1204 if (!(flags & (FOLL_GET | FOLL_PIN)))
1205 return ERR_PTR(-EEXIST);
1206
1207 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1208 *pgmap = get_dev_pagemap(pfn, *pgmap);
1209 if (!*pgmap)
1210 return ERR_PTR(-EFAULT);
1211 page = pfn_to_page(pfn);
1212
1213 ret = try_grab_page(page, flags);
1214 if (ret)
1215 page = ERR_PTR(ret);
1216
1217 return page;
1218 }
1219
copy_huge_pud(struct mm_struct * dst_mm,struct mm_struct * src_mm,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,struct vm_area_struct * vma)1220 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1221 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1222 struct vm_area_struct *vma)
1223 {
1224 spinlock_t *dst_ptl, *src_ptl;
1225 pud_t pud;
1226 int ret;
1227
1228 dst_ptl = pud_lock(dst_mm, dst_pud);
1229 src_ptl = pud_lockptr(src_mm, src_pud);
1230 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1231
1232 ret = -EAGAIN;
1233 pud = *src_pud;
1234 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1235 goto out_unlock;
1236
1237 /*
1238 * When page table lock is held, the huge zero pud should not be
1239 * under splitting since we don't split the page itself, only pud to
1240 * a page table.
1241 */
1242 if (is_huge_zero_pud(pud)) {
1243 /* No huge zero pud yet */
1244 }
1245
1246 /*
1247 * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1248 * and split if duplicating fails.
1249 */
1250 pudp_set_wrprotect(src_mm, addr, src_pud);
1251 pud = pud_mkold(pud_wrprotect(pud));
1252 set_pud_at(dst_mm, addr, dst_pud, pud);
1253
1254 ret = 0;
1255 out_unlock:
1256 spin_unlock(src_ptl);
1257 spin_unlock(dst_ptl);
1258 return ret;
1259 }
1260
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)1261 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1262 {
1263 bool write = vmf->flags & FAULT_FLAG_WRITE;
1264
1265 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1266 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1267 goto unlock;
1268
1269 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1270 unlock:
1271 spin_unlock(vmf->ptl);
1272 }
1273 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1274
huge_pmd_set_accessed(struct vm_fault * vmf)1275 void huge_pmd_set_accessed(struct vm_fault *vmf)
1276 {
1277 bool write = vmf->flags & FAULT_FLAG_WRITE;
1278
1279 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1280 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1281 goto unlock;
1282
1283 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1284
1285 unlock:
1286 spin_unlock(vmf->ptl);
1287 }
1288
do_huge_pmd_wp_page(struct vm_fault * vmf)1289 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1290 {
1291 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1292 struct vm_area_struct *vma = vmf->vma;
1293 struct folio *folio;
1294 struct page *page;
1295 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1296 pmd_t orig_pmd = vmf->orig_pmd;
1297
1298 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1299 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1300
1301 if (is_huge_zero_pmd(orig_pmd))
1302 goto fallback;
1303
1304 spin_lock(vmf->ptl);
1305
1306 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1307 spin_unlock(vmf->ptl);
1308 return 0;
1309 }
1310
1311 page = pmd_page(orig_pmd);
1312 folio = page_folio(page);
1313 VM_BUG_ON_PAGE(!PageHead(page), page);
1314
1315 /* Early check when only holding the PT lock. */
1316 if (PageAnonExclusive(page))
1317 goto reuse;
1318
1319 if (!folio_trylock(folio)) {
1320 folio_get(folio);
1321 spin_unlock(vmf->ptl);
1322 folio_lock(folio);
1323 spin_lock(vmf->ptl);
1324 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1325 spin_unlock(vmf->ptl);
1326 folio_unlock(folio);
1327 folio_put(folio);
1328 return 0;
1329 }
1330 folio_put(folio);
1331 }
1332
1333 /* Recheck after temporarily dropping the PT lock. */
1334 if (PageAnonExclusive(page)) {
1335 folio_unlock(folio);
1336 goto reuse;
1337 }
1338
1339 /*
1340 * See do_wp_page(): we can only reuse the folio exclusively if
1341 * there are no additional references. Note that we always drain
1342 * the LRU cache immediately after adding a THP.
1343 */
1344 if (folio_ref_count(folio) >
1345 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1346 goto unlock_fallback;
1347 if (folio_test_swapcache(folio))
1348 folio_free_swap(folio);
1349 if (folio_ref_count(folio) == 1) {
1350 pmd_t entry;
1351
1352 page_move_anon_rmap(page, vma);
1353 folio_unlock(folio);
1354 reuse:
1355 if (unlikely(unshare)) {
1356 spin_unlock(vmf->ptl);
1357 return 0;
1358 }
1359 entry = pmd_mkyoung(orig_pmd);
1360 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1361 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1362 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1363 spin_unlock(vmf->ptl);
1364 return 0;
1365 }
1366
1367 unlock_fallback:
1368 folio_unlock(folio);
1369 spin_unlock(vmf->ptl);
1370 fallback:
1371 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1372 return VM_FAULT_FALLBACK;
1373 }
1374
can_change_pmd_writable(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)1375 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1376 unsigned long addr, pmd_t pmd)
1377 {
1378 struct page *page;
1379
1380 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1381 return false;
1382
1383 /* Don't touch entries that are not even readable (NUMA hinting). */
1384 if (pmd_protnone(pmd))
1385 return false;
1386
1387 /* Do we need write faults for softdirty tracking? */
1388 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1389 return false;
1390
1391 /* Do we need write faults for uffd-wp tracking? */
1392 if (userfaultfd_huge_pmd_wp(vma, pmd))
1393 return false;
1394
1395 if (!(vma->vm_flags & VM_SHARED)) {
1396 /* See can_change_pte_writable(). */
1397 page = vm_normal_page_pmd(vma, addr, pmd);
1398 return page && PageAnon(page) && PageAnonExclusive(page);
1399 }
1400
1401 /* See can_change_pte_writable(). */
1402 return pmd_dirty(pmd);
1403 }
1404
1405 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
can_follow_write_pmd(pmd_t pmd,struct page * page,struct vm_area_struct * vma,unsigned int flags)1406 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1407 struct vm_area_struct *vma,
1408 unsigned int flags)
1409 {
1410 /* If the pmd is writable, we can write to the page. */
1411 if (pmd_write(pmd))
1412 return true;
1413
1414 /* Maybe FOLL_FORCE is set to override it? */
1415 if (!(flags & FOLL_FORCE))
1416 return false;
1417
1418 /* But FOLL_FORCE has no effect on shared mappings */
1419 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1420 return false;
1421
1422 /* ... or read-only private ones */
1423 if (!(vma->vm_flags & VM_MAYWRITE))
1424 return false;
1425
1426 /* ... or already writable ones that just need to take a write fault */
1427 if (vma->vm_flags & VM_WRITE)
1428 return false;
1429
1430 /*
1431 * See can_change_pte_writable(): we broke COW and could map the page
1432 * writable if we have an exclusive anonymous page ...
1433 */
1434 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1435 return false;
1436
1437 /* ... and a write-fault isn't required for other reasons. */
1438 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1439 return false;
1440 return !userfaultfd_huge_pmd_wp(vma, pmd);
1441 }
1442
follow_trans_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,unsigned int flags)1443 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1444 unsigned long addr,
1445 pmd_t *pmd,
1446 unsigned int flags)
1447 {
1448 struct mm_struct *mm = vma->vm_mm;
1449 struct page *page;
1450 int ret;
1451
1452 assert_spin_locked(pmd_lockptr(mm, pmd));
1453
1454 page = pmd_page(*pmd);
1455 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1456
1457 if ((flags & FOLL_WRITE) &&
1458 !can_follow_write_pmd(*pmd, page, vma, flags))
1459 return NULL;
1460
1461 /* Avoid dumping huge zero page */
1462 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1463 return ERR_PTR(-EFAULT);
1464
1465 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
1466 return NULL;
1467
1468 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1469 return ERR_PTR(-EMLINK);
1470
1471 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1472 !PageAnonExclusive(page), page);
1473
1474 ret = try_grab_page(page, flags);
1475 if (ret)
1476 return ERR_PTR(ret);
1477
1478 if (flags & FOLL_TOUCH)
1479 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1480
1481 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1482 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1483
1484 return page;
1485 }
1486
1487 /* NUMA hinting page fault entry point for trans huge pmds */
do_huge_pmd_numa_page(struct vm_fault * vmf)1488 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1489 {
1490 struct vm_area_struct *vma = vmf->vma;
1491 pmd_t oldpmd = vmf->orig_pmd;
1492 pmd_t pmd;
1493 struct page *page;
1494 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1495 int page_nid = NUMA_NO_NODE;
1496 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1497 bool migrated = false, writable = false;
1498 int flags = 0;
1499
1500 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1501 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1502 spin_unlock(vmf->ptl);
1503 goto out;
1504 }
1505
1506 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1507
1508 /*
1509 * Detect now whether the PMD could be writable; this information
1510 * is only valid while holding the PT lock.
1511 */
1512 writable = pmd_write(pmd);
1513 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1514 can_change_pmd_writable(vma, vmf->address, pmd))
1515 writable = true;
1516
1517 page = vm_normal_page_pmd(vma, haddr, pmd);
1518 if (!page)
1519 goto out_map;
1520
1521 /* See similar comment in do_numa_page for explanation */
1522 if (!writable)
1523 flags |= TNF_NO_GROUP;
1524
1525 page_nid = page_to_nid(page);
1526 /*
1527 * For memory tiering mode, cpupid of slow memory page is used
1528 * to record page access time. So use default value.
1529 */
1530 if (node_is_toptier(page_nid))
1531 last_cpupid = page_cpupid_last(page);
1532 target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
1533 &flags);
1534
1535 if (target_nid == NUMA_NO_NODE) {
1536 put_page(page);
1537 goto out_map;
1538 }
1539
1540 spin_unlock(vmf->ptl);
1541 writable = false;
1542
1543 migrated = migrate_misplaced_page(page, vma, target_nid);
1544 if (migrated) {
1545 flags |= TNF_MIGRATED;
1546 page_nid = target_nid;
1547 } else {
1548 flags |= TNF_MIGRATE_FAIL;
1549 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1550 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1551 spin_unlock(vmf->ptl);
1552 goto out;
1553 }
1554 goto out_map;
1555 }
1556
1557 out:
1558 if (page_nid != NUMA_NO_NODE)
1559 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1560 flags);
1561
1562 return 0;
1563
1564 out_map:
1565 /* Restore the PMD */
1566 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1567 pmd = pmd_mkyoung(pmd);
1568 if (writable)
1569 pmd = pmd_mkwrite(pmd, vma);
1570 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1571 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1572 spin_unlock(vmf->ptl);
1573 goto out;
1574 }
1575
1576 /*
1577 * Return true if we do MADV_FREE successfully on entire pmd page.
1578 * Otherwise, return false.
1579 */
madvise_free_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long next)1580 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1581 pmd_t *pmd, unsigned long addr, unsigned long next)
1582 {
1583 spinlock_t *ptl;
1584 pmd_t orig_pmd;
1585 struct folio *folio;
1586 struct mm_struct *mm = tlb->mm;
1587 bool ret = false;
1588
1589 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1590
1591 ptl = pmd_trans_huge_lock(pmd, vma);
1592 if (!ptl)
1593 goto out_unlocked;
1594
1595 orig_pmd = *pmd;
1596 if (is_huge_zero_pmd(orig_pmd))
1597 goto out;
1598
1599 if (unlikely(!pmd_present(orig_pmd))) {
1600 VM_BUG_ON(thp_migration_supported() &&
1601 !is_pmd_migration_entry(orig_pmd));
1602 goto out;
1603 }
1604
1605 folio = pfn_folio(pmd_pfn(orig_pmd));
1606 /*
1607 * If other processes are mapping this folio, we couldn't discard
1608 * the folio unless they all do MADV_FREE so let's skip the folio.
1609 */
1610 if (folio_estimated_sharers(folio) != 1)
1611 goto out;
1612
1613 if (!folio_trylock(folio))
1614 goto out;
1615
1616 /*
1617 * If user want to discard part-pages of THP, split it so MADV_FREE
1618 * will deactivate only them.
1619 */
1620 if (next - addr != HPAGE_PMD_SIZE) {
1621 folio_get(folio);
1622 spin_unlock(ptl);
1623 split_folio(folio);
1624 folio_unlock(folio);
1625 folio_put(folio);
1626 goto out_unlocked;
1627 }
1628
1629 if (folio_test_dirty(folio))
1630 folio_clear_dirty(folio);
1631 folio_unlock(folio);
1632
1633 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1634 pmdp_invalidate(vma, addr, pmd);
1635 orig_pmd = pmd_mkold(orig_pmd);
1636 orig_pmd = pmd_mkclean(orig_pmd);
1637
1638 set_pmd_at(mm, addr, pmd, orig_pmd);
1639 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1640 }
1641
1642 folio_mark_lazyfree(folio);
1643 ret = true;
1644 out:
1645 spin_unlock(ptl);
1646 out_unlocked:
1647 return ret;
1648 }
1649
zap_deposited_table(struct mm_struct * mm,pmd_t * pmd)1650 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1651 {
1652 pgtable_t pgtable;
1653
1654 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1655 pte_free(mm, pgtable);
1656 mm_dec_nr_ptes(mm);
1657 }
1658
zap_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr)1659 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1660 pmd_t *pmd, unsigned long addr)
1661 {
1662 pmd_t orig_pmd;
1663 spinlock_t *ptl;
1664
1665 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1666
1667 ptl = __pmd_trans_huge_lock(pmd, vma);
1668 if (!ptl)
1669 return 0;
1670 /*
1671 * For architectures like ppc64 we look at deposited pgtable
1672 * when calling pmdp_huge_get_and_clear. So do the
1673 * pgtable_trans_huge_withdraw after finishing pmdp related
1674 * operations.
1675 */
1676 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1677 tlb->fullmm);
1678 arch_check_zapped_pmd(vma, orig_pmd);
1679 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1680 if (vma_is_special_huge(vma)) {
1681 if (arch_needs_pgtable_deposit())
1682 zap_deposited_table(tlb->mm, pmd);
1683 spin_unlock(ptl);
1684 } else if (is_huge_zero_pmd(orig_pmd)) {
1685 zap_deposited_table(tlb->mm, pmd);
1686 spin_unlock(ptl);
1687 } else {
1688 struct page *page = NULL;
1689 int flush_needed = 1;
1690
1691 if (pmd_present(orig_pmd)) {
1692 page = pmd_page(orig_pmd);
1693 page_remove_rmap(page, vma, true);
1694 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1695 VM_BUG_ON_PAGE(!PageHead(page), page);
1696 } else if (thp_migration_supported()) {
1697 swp_entry_t entry;
1698
1699 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1700 entry = pmd_to_swp_entry(orig_pmd);
1701 page = pfn_swap_entry_to_page(entry);
1702 flush_needed = 0;
1703 } else
1704 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1705
1706 if (PageAnon(page)) {
1707 zap_deposited_table(tlb->mm, pmd);
1708 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1709 } else {
1710 if (arch_needs_pgtable_deposit())
1711 zap_deposited_table(tlb->mm, pmd);
1712 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1713 }
1714
1715 spin_unlock(ptl);
1716 if (flush_needed)
1717 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1718 }
1719 return 1;
1720 }
1721
1722 #ifndef pmd_move_must_withdraw
pmd_move_must_withdraw(spinlock_t * new_pmd_ptl,spinlock_t * old_pmd_ptl,struct vm_area_struct * vma)1723 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1724 spinlock_t *old_pmd_ptl,
1725 struct vm_area_struct *vma)
1726 {
1727 /*
1728 * With split pmd lock we also need to move preallocated
1729 * PTE page table if new_pmd is on different PMD page table.
1730 *
1731 * We also don't deposit and withdraw tables for file pages.
1732 */
1733 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1734 }
1735 #endif
1736
move_soft_dirty_pmd(pmd_t pmd)1737 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1738 {
1739 #ifdef CONFIG_MEM_SOFT_DIRTY
1740 if (unlikely(is_pmd_migration_entry(pmd)))
1741 pmd = pmd_swp_mksoft_dirty(pmd);
1742 else if (pmd_present(pmd))
1743 pmd = pmd_mksoft_dirty(pmd);
1744 #endif
1745 return pmd;
1746 }
1747
move_huge_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)1748 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1749 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1750 {
1751 spinlock_t *old_ptl, *new_ptl;
1752 pmd_t pmd;
1753 struct mm_struct *mm = vma->vm_mm;
1754 bool force_flush = false;
1755
1756 /*
1757 * The destination pmd shouldn't be established, free_pgtables()
1758 * should have released it; but move_page_tables() might have already
1759 * inserted a page table, if racing against shmem/file collapse.
1760 */
1761 if (!pmd_none(*new_pmd)) {
1762 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1763 return false;
1764 }
1765
1766 /*
1767 * We don't have to worry about the ordering of src and dst
1768 * ptlocks because exclusive mmap_lock prevents deadlock.
1769 */
1770 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1771 if (old_ptl) {
1772 new_ptl = pmd_lockptr(mm, new_pmd);
1773 if (new_ptl != old_ptl)
1774 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1775 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1776 if (pmd_present(pmd))
1777 force_flush = true;
1778 VM_BUG_ON(!pmd_none(*new_pmd));
1779
1780 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1781 pgtable_t pgtable;
1782 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1783 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1784 }
1785 pmd = move_soft_dirty_pmd(pmd);
1786 set_pmd_at(mm, new_addr, new_pmd, pmd);
1787 if (force_flush)
1788 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1789 if (new_ptl != old_ptl)
1790 spin_unlock(new_ptl);
1791 spin_unlock(old_ptl);
1792 return true;
1793 }
1794 return false;
1795 }
1796
1797 /*
1798 * Returns
1799 * - 0 if PMD could not be locked
1800 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1801 * or if prot_numa but THP migration is not supported
1802 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
1803 */
change_huge_pmd(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)1804 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1805 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1806 unsigned long cp_flags)
1807 {
1808 struct mm_struct *mm = vma->vm_mm;
1809 spinlock_t *ptl;
1810 pmd_t oldpmd, entry;
1811 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1812 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1813 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1814 int ret = 1;
1815
1816 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1817
1818 if (prot_numa && !thp_migration_supported())
1819 return 1;
1820
1821 ptl = __pmd_trans_huge_lock(pmd, vma);
1822 if (!ptl)
1823 return 0;
1824
1825 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1826 if (is_swap_pmd(*pmd)) {
1827 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1828 struct page *page = pfn_swap_entry_to_page(entry);
1829 pmd_t newpmd;
1830
1831 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1832 if (is_writable_migration_entry(entry)) {
1833 /*
1834 * A protection check is difficult so
1835 * just be safe and disable write
1836 */
1837 if (PageAnon(page))
1838 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
1839 else
1840 entry = make_readable_migration_entry(swp_offset(entry));
1841 newpmd = swp_entry_to_pmd(entry);
1842 if (pmd_swp_soft_dirty(*pmd))
1843 newpmd = pmd_swp_mksoft_dirty(newpmd);
1844 } else {
1845 newpmd = *pmd;
1846 }
1847
1848 if (uffd_wp)
1849 newpmd = pmd_swp_mkuffd_wp(newpmd);
1850 else if (uffd_wp_resolve)
1851 newpmd = pmd_swp_clear_uffd_wp(newpmd);
1852 if (!pmd_same(*pmd, newpmd))
1853 set_pmd_at(mm, addr, pmd, newpmd);
1854 goto unlock;
1855 }
1856 #endif
1857
1858 if (prot_numa) {
1859 struct page *page;
1860 bool toptier;
1861 /*
1862 * Avoid trapping faults against the zero page. The read-only
1863 * data is likely to be read-cached on the local CPU and
1864 * local/remote hits to the zero page are not interesting.
1865 */
1866 if (is_huge_zero_pmd(*pmd))
1867 goto unlock;
1868
1869 if (pmd_protnone(*pmd))
1870 goto unlock;
1871
1872 page = pmd_page(*pmd);
1873 toptier = node_is_toptier(page_to_nid(page));
1874 /*
1875 * Skip scanning top tier node if normal numa
1876 * balancing is disabled
1877 */
1878 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
1879 toptier)
1880 goto unlock;
1881
1882 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
1883 !toptier)
1884 xchg_page_access_time(page, jiffies_to_msecs(jiffies));
1885 }
1886 /*
1887 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1888 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1889 * which is also under mmap_read_lock(mm):
1890 *
1891 * CPU0: CPU1:
1892 * change_huge_pmd(prot_numa=1)
1893 * pmdp_huge_get_and_clear_notify()
1894 * madvise_dontneed()
1895 * zap_pmd_range()
1896 * pmd_trans_huge(*pmd) == 0 (without ptl)
1897 * // skip the pmd
1898 * set_pmd_at();
1899 * // pmd is re-established
1900 *
1901 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1902 * which may break userspace.
1903 *
1904 * pmdp_invalidate_ad() is required to make sure we don't miss
1905 * dirty/young flags set by hardware.
1906 */
1907 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
1908
1909 entry = pmd_modify(oldpmd, newprot);
1910 if (uffd_wp)
1911 entry = pmd_mkuffd_wp(entry);
1912 else if (uffd_wp_resolve)
1913 /*
1914 * Leave the write bit to be handled by PF interrupt
1915 * handler, then things like COW could be properly
1916 * handled.
1917 */
1918 entry = pmd_clear_uffd_wp(entry);
1919
1920 /* See change_pte_range(). */
1921 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
1922 can_change_pmd_writable(vma, addr, entry))
1923 entry = pmd_mkwrite(entry, vma);
1924
1925 ret = HPAGE_PMD_NR;
1926 set_pmd_at(mm, addr, pmd, entry);
1927
1928 if (huge_pmd_needs_flush(oldpmd, entry))
1929 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
1930 unlock:
1931 spin_unlock(ptl);
1932 return ret;
1933 }
1934
1935 /*
1936 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1937 *
1938 * Note that if it returns page table lock pointer, this routine returns without
1939 * unlocking page table lock. So callers must unlock it.
1940 */
__pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)1941 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1942 {
1943 spinlock_t *ptl;
1944 ptl = pmd_lock(vma->vm_mm, pmd);
1945 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1946 pmd_devmap(*pmd)))
1947 return ptl;
1948 spin_unlock(ptl);
1949 return NULL;
1950 }
1951
1952 /*
1953 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1954 *
1955 * Note that if it returns page table lock pointer, this routine returns without
1956 * unlocking page table lock. So callers must unlock it.
1957 */
__pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)1958 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1959 {
1960 spinlock_t *ptl;
1961
1962 ptl = pud_lock(vma->vm_mm, pud);
1963 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1964 return ptl;
1965 spin_unlock(ptl);
1966 return NULL;
1967 }
1968
1969 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
zap_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr)1970 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1971 pud_t *pud, unsigned long addr)
1972 {
1973 spinlock_t *ptl;
1974
1975 ptl = __pud_trans_huge_lock(pud, vma);
1976 if (!ptl)
1977 return 0;
1978
1979 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
1980 tlb_remove_pud_tlb_entry(tlb, pud, addr);
1981 if (vma_is_special_huge(vma)) {
1982 spin_unlock(ptl);
1983 /* No zero page support yet */
1984 } else {
1985 /* No support for anonymous PUD pages yet */
1986 BUG();
1987 }
1988 return 1;
1989 }
1990
__split_huge_pud_locked(struct vm_area_struct * vma,pud_t * pud,unsigned long haddr)1991 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1992 unsigned long haddr)
1993 {
1994 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1995 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1996 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1997 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1998
1999 count_vm_event(THP_SPLIT_PUD);
2000
2001 pudp_huge_clear_flush(vma, haddr, pud);
2002 }
2003
__split_huge_pud(struct vm_area_struct * vma,pud_t * pud,unsigned long address)2004 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2005 unsigned long address)
2006 {
2007 spinlock_t *ptl;
2008 struct mmu_notifier_range range;
2009
2010 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2011 address & HPAGE_PUD_MASK,
2012 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2013 mmu_notifier_invalidate_range_start(&range);
2014 ptl = pud_lock(vma->vm_mm, pud);
2015 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2016 goto out;
2017 __split_huge_pud_locked(vma, pud, range.start);
2018
2019 out:
2020 spin_unlock(ptl);
2021 mmu_notifier_invalidate_range_end(&range);
2022 }
2023 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2024
__split_huge_zero_page_pmd(struct vm_area_struct * vma,unsigned long haddr,pmd_t * pmd)2025 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2026 unsigned long haddr, pmd_t *pmd)
2027 {
2028 struct mm_struct *mm = vma->vm_mm;
2029 pgtable_t pgtable;
2030 pmd_t _pmd, old_pmd;
2031 unsigned long addr;
2032 pte_t *pte;
2033 int i;
2034
2035 /*
2036 * Leave pmd empty until pte is filled note that it is fine to delay
2037 * notification until mmu_notifier_invalidate_range_end() as we are
2038 * replacing a zero pmd write protected page with a zero pte write
2039 * protected page.
2040 *
2041 * See Documentation/mm/mmu_notifier.rst
2042 */
2043 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2044
2045 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2046 pmd_populate(mm, &_pmd, pgtable);
2047
2048 pte = pte_offset_map(&_pmd, haddr);
2049 VM_BUG_ON(!pte);
2050 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2051 pte_t entry;
2052
2053 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2054 entry = pte_mkspecial(entry);
2055 if (pmd_uffd_wp(old_pmd))
2056 entry = pte_mkuffd_wp(entry);
2057 VM_BUG_ON(!pte_none(ptep_get(pte)));
2058 set_pte_at(mm, addr, pte, entry);
2059 pte++;
2060 }
2061 pte_unmap(pte - 1);
2062 smp_wmb(); /* make pte visible before pmd */
2063 pmd_populate(mm, pmd, pgtable);
2064 }
2065
__split_huge_pmd_locked(struct vm_area_struct * vma,pmd_t * pmd,unsigned long haddr,bool freeze)2066 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2067 unsigned long haddr, bool freeze)
2068 {
2069 struct mm_struct *mm = vma->vm_mm;
2070 struct page *page;
2071 pgtable_t pgtable;
2072 pmd_t old_pmd, _pmd;
2073 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2074 bool anon_exclusive = false, dirty = false;
2075 unsigned long addr;
2076 pte_t *pte;
2077 int i;
2078
2079 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2080 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2081 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2082 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2083 && !pmd_devmap(*pmd));
2084
2085 count_vm_event(THP_SPLIT_PMD);
2086
2087 if (!vma_is_anonymous(vma)) {
2088 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2089 /*
2090 * We are going to unmap this huge page. So
2091 * just go ahead and zap it
2092 */
2093 if (arch_needs_pgtable_deposit())
2094 zap_deposited_table(mm, pmd);
2095 if (vma_is_special_huge(vma))
2096 return;
2097 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2098 swp_entry_t entry;
2099
2100 entry = pmd_to_swp_entry(old_pmd);
2101 page = pfn_swap_entry_to_page(entry);
2102 } else {
2103 page = pmd_page(old_pmd);
2104 if (!PageDirty(page) && pmd_dirty(old_pmd))
2105 set_page_dirty(page);
2106 if (!PageReferenced(page) && pmd_young(old_pmd))
2107 SetPageReferenced(page);
2108 page_remove_rmap(page, vma, true);
2109 put_page(page);
2110 }
2111 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2112 return;
2113 }
2114
2115 if (is_huge_zero_pmd(*pmd)) {
2116 /*
2117 * FIXME: Do we want to invalidate secondary mmu by calling
2118 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2119 * inside __split_huge_pmd() ?
2120 *
2121 * We are going from a zero huge page write protected to zero
2122 * small page also write protected so it does not seems useful
2123 * to invalidate secondary mmu at this time.
2124 */
2125 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2126 }
2127
2128 /*
2129 * Up to this point the pmd is present and huge and userland has the
2130 * whole access to the hugepage during the split (which happens in
2131 * place). If we overwrite the pmd with the not-huge version pointing
2132 * to the pte here (which of course we could if all CPUs were bug
2133 * free), userland could trigger a small page size TLB miss on the
2134 * small sized TLB while the hugepage TLB entry is still established in
2135 * the huge TLB. Some CPU doesn't like that.
2136 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2137 * 383 on page 105. Intel should be safe but is also warns that it's
2138 * only safe if the permission and cache attributes of the two entries
2139 * loaded in the two TLB is identical (which should be the case here).
2140 * But it is generally safer to never allow small and huge TLB entries
2141 * for the same virtual address to be loaded simultaneously. So instead
2142 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2143 * current pmd notpresent (atomically because here the pmd_trans_huge
2144 * must remain set at all times on the pmd until the split is complete
2145 * for this pmd), then we flush the SMP TLB and finally we write the
2146 * non-huge version of the pmd entry with pmd_populate.
2147 */
2148 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2149
2150 pmd_migration = is_pmd_migration_entry(old_pmd);
2151 if (unlikely(pmd_migration)) {
2152 swp_entry_t entry;
2153
2154 entry = pmd_to_swp_entry(old_pmd);
2155 page = pfn_swap_entry_to_page(entry);
2156 write = is_writable_migration_entry(entry);
2157 if (PageAnon(page))
2158 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2159 young = is_migration_entry_young(entry);
2160 dirty = is_migration_entry_dirty(entry);
2161 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2162 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2163 } else {
2164 page = pmd_page(old_pmd);
2165 if (pmd_dirty(old_pmd)) {
2166 dirty = true;
2167 SetPageDirty(page);
2168 }
2169 write = pmd_write(old_pmd);
2170 young = pmd_young(old_pmd);
2171 soft_dirty = pmd_soft_dirty(old_pmd);
2172 uffd_wp = pmd_uffd_wp(old_pmd);
2173
2174 VM_BUG_ON_PAGE(!page_count(page), page);
2175
2176 /*
2177 * Without "freeze", we'll simply split the PMD, propagating the
2178 * PageAnonExclusive() flag for each PTE by setting it for
2179 * each subpage -- no need to (temporarily) clear.
2180 *
2181 * With "freeze" we want to replace mapped pages by
2182 * migration entries right away. This is only possible if we
2183 * managed to clear PageAnonExclusive() -- see
2184 * set_pmd_migration_entry().
2185 *
2186 * In case we cannot clear PageAnonExclusive(), split the PMD
2187 * only and let try_to_migrate_one() fail later.
2188 *
2189 * See page_try_share_anon_rmap(): invalidate PMD first.
2190 */
2191 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
2192 if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2193 freeze = false;
2194 if (!freeze)
2195 page_ref_add(page, HPAGE_PMD_NR - 1);
2196 }
2197
2198 /*
2199 * Withdraw the table only after we mark the pmd entry invalid.
2200 * This's critical for some architectures (Power).
2201 */
2202 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2203 pmd_populate(mm, &_pmd, pgtable);
2204
2205 pte = pte_offset_map(&_pmd, haddr);
2206 VM_BUG_ON(!pte);
2207 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2208 pte_t entry;
2209 /*
2210 * Note that NUMA hinting access restrictions are not
2211 * transferred to avoid any possibility of altering
2212 * permissions across VMAs.
2213 */
2214 if (freeze || pmd_migration) {
2215 swp_entry_t swp_entry;
2216 if (write)
2217 swp_entry = make_writable_migration_entry(
2218 page_to_pfn(page + i));
2219 else if (anon_exclusive)
2220 swp_entry = make_readable_exclusive_migration_entry(
2221 page_to_pfn(page + i));
2222 else
2223 swp_entry = make_readable_migration_entry(
2224 page_to_pfn(page + i));
2225 if (young)
2226 swp_entry = make_migration_entry_young(swp_entry);
2227 if (dirty)
2228 swp_entry = make_migration_entry_dirty(swp_entry);
2229 entry = swp_entry_to_pte(swp_entry);
2230 if (soft_dirty)
2231 entry = pte_swp_mksoft_dirty(entry);
2232 if (uffd_wp)
2233 entry = pte_swp_mkuffd_wp(entry);
2234 } else {
2235 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2236 if (write)
2237 entry = pte_mkwrite(entry, vma);
2238 if (anon_exclusive)
2239 SetPageAnonExclusive(page + i);
2240 if (!young)
2241 entry = pte_mkold(entry);
2242 /* NOTE: this may set soft-dirty too on some archs */
2243 if (dirty)
2244 entry = pte_mkdirty(entry);
2245 if (soft_dirty)
2246 entry = pte_mksoft_dirty(entry);
2247 if (uffd_wp)
2248 entry = pte_mkuffd_wp(entry);
2249 page_add_anon_rmap(page + i, vma, addr, RMAP_NONE);
2250 }
2251 VM_BUG_ON(!pte_none(ptep_get(pte)));
2252 set_pte_at(mm, addr, pte, entry);
2253 pte++;
2254 }
2255 pte_unmap(pte - 1);
2256
2257 if (!pmd_migration)
2258 page_remove_rmap(page, vma, true);
2259 if (freeze)
2260 put_page(page);
2261
2262 smp_wmb(); /* make pte visible before pmd */
2263 pmd_populate(mm, pmd, pgtable);
2264 }
2265
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze,struct folio * folio)2266 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2267 unsigned long address, bool freeze, struct folio *folio)
2268 {
2269 spinlock_t *ptl;
2270 struct mmu_notifier_range range;
2271
2272 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2273 address & HPAGE_PMD_MASK,
2274 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2275 mmu_notifier_invalidate_range_start(&range);
2276 ptl = pmd_lock(vma->vm_mm, pmd);
2277
2278 /*
2279 * If caller asks to setup a migration entry, we need a folio to check
2280 * pmd against. Otherwise we can end up replacing wrong folio.
2281 */
2282 VM_BUG_ON(freeze && !folio);
2283 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2284
2285 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2286 is_pmd_migration_entry(*pmd)) {
2287 /*
2288 * It's safe to call pmd_page when folio is set because it's
2289 * guaranteed that pmd is present.
2290 */
2291 if (folio && folio != page_folio(pmd_page(*pmd)))
2292 goto out;
2293 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2294 }
2295
2296 out:
2297 spin_unlock(ptl);
2298 mmu_notifier_invalidate_range_end(&range);
2299 }
2300
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze,struct folio * folio)2301 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2302 bool freeze, struct folio *folio)
2303 {
2304 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2305
2306 if (!pmd)
2307 return;
2308
2309 __split_huge_pmd(vma, pmd, address, freeze, folio);
2310 }
2311
split_huge_pmd_if_needed(struct vm_area_struct * vma,unsigned long address)2312 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2313 {
2314 /*
2315 * If the new address isn't hpage aligned and it could previously
2316 * contain an hugepage: check if we need to split an huge pmd.
2317 */
2318 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2319 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2320 ALIGN(address, HPAGE_PMD_SIZE)))
2321 split_huge_pmd_address(vma, address, false, NULL);
2322 }
2323
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)2324 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2325 unsigned long start,
2326 unsigned long end,
2327 long adjust_next)
2328 {
2329 /* Check if we need to split start first. */
2330 split_huge_pmd_if_needed(vma, start);
2331
2332 /* Check if we need to split end next. */
2333 split_huge_pmd_if_needed(vma, end);
2334
2335 /*
2336 * If we're also updating the next vma vm_start,
2337 * check if we need to split it.
2338 */
2339 if (adjust_next > 0) {
2340 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2341 unsigned long nstart = next->vm_start;
2342 nstart += adjust_next;
2343 split_huge_pmd_if_needed(next, nstart);
2344 }
2345 }
2346
unmap_folio(struct folio * folio)2347 static void unmap_folio(struct folio *folio)
2348 {
2349 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2350 TTU_SYNC;
2351
2352 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2353
2354 /*
2355 * Anon pages need migration entries to preserve them, but file
2356 * pages can simply be left unmapped, then faulted back on demand.
2357 * If that is ever changed (perhaps for mlock), update remap_page().
2358 */
2359 if (folio_test_anon(folio))
2360 try_to_migrate(folio, ttu_flags);
2361 else
2362 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2363 }
2364
remap_page(struct folio * folio,unsigned long nr)2365 static void remap_page(struct folio *folio, unsigned long nr)
2366 {
2367 int i = 0;
2368
2369 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2370 if (!folio_test_anon(folio))
2371 return;
2372 for (;;) {
2373 remove_migration_ptes(folio, folio, true);
2374 i += folio_nr_pages(folio);
2375 if (i >= nr)
2376 break;
2377 folio = folio_next(folio);
2378 }
2379 }
2380
lru_add_page_tail(struct page * head,struct page * tail,struct lruvec * lruvec,struct list_head * list)2381 static void lru_add_page_tail(struct page *head, struct page *tail,
2382 struct lruvec *lruvec, struct list_head *list)
2383 {
2384 VM_BUG_ON_PAGE(!PageHead(head), head);
2385 VM_BUG_ON_PAGE(PageCompound(tail), head);
2386 VM_BUG_ON_PAGE(PageLRU(tail), head);
2387 lockdep_assert_held(&lruvec->lru_lock);
2388
2389 if (list) {
2390 /* page reclaim is reclaiming a huge page */
2391 VM_WARN_ON(PageLRU(head));
2392 get_page(tail);
2393 list_add_tail(&tail->lru, list);
2394 } else {
2395 /* head is still on lru (and we have it frozen) */
2396 VM_WARN_ON(!PageLRU(head));
2397 if (PageUnevictable(tail))
2398 tail->mlock_count = 0;
2399 else
2400 list_add_tail(&tail->lru, &head->lru);
2401 SetPageLRU(tail);
2402 }
2403 }
2404
__split_huge_page_tail(struct folio * folio,int tail,struct lruvec * lruvec,struct list_head * list)2405 static void __split_huge_page_tail(struct folio *folio, int tail,
2406 struct lruvec *lruvec, struct list_head *list)
2407 {
2408 struct page *head = &folio->page;
2409 struct page *page_tail = head + tail;
2410 /*
2411 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2412 * Don't pass it around before clear_compound_head().
2413 */
2414 struct folio *new_folio = (struct folio *)page_tail;
2415
2416 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2417
2418 /*
2419 * Clone page flags before unfreezing refcount.
2420 *
2421 * After successful get_page_unless_zero() might follow flags change,
2422 * for example lock_page() which set PG_waiters.
2423 *
2424 * Note that for mapped sub-pages of an anonymous THP,
2425 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2426 * the migration entry instead from where remap_page() will restore it.
2427 * We can still have PG_anon_exclusive set on effectively unmapped and
2428 * unreferenced sub-pages of an anonymous THP: we can simply drop
2429 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2430 */
2431 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2432 page_tail->flags |= (head->flags &
2433 ((1L << PG_referenced) |
2434 (1L << PG_swapbacked) |
2435 (1L << PG_swapcache) |
2436 (1L << PG_mlocked) |
2437 (1L << PG_uptodate) |
2438 (1L << PG_active) |
2439 (1L << PG_workingset) |
2440 (1L << PG_locked) |
2441 (1L << PG_unevictable) |
2442 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2443 (1L << PG_arch_2) |
2444 (1L << PG_arch_3) |
2445 #endif
2446 (1L << PG_dirty) |
2447 LRU_GEN_MASK | LRU_REFS_MASK));
2448
2449 /* ->mapping in first and second tail page is replaced by other uses */
2450 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2451 page_tail);
2452 page_tail->mapping = head->mapping;
2453 page_tail->index = head->index + tail;
2454
2455 /*
2456 * page->private should not be set in tail pages. Fix up and warn once
2457 * if private is unexpectedly set.
2458 */
2459 if (unlikely(page_tail->private)) {
2460 VM_WARN_ON_ONCE_PAGE(true, page_tail);
2461 page_tail->private = 0;
2462 }
2463 if (folio_test_swapcache(folio))
2464 new_folio->swap.val = folio->swap.val + tail;
2465
2466 /* Page flags must be visible before we make the page non-compound. */
2467 smp_wmb();
2468
2469 /*
2470 * Clear PageTail before unfreezing page refcount.
2471 *
2472 * After successful get_page_unless_zero() might follow put_page()
2473 * which needs correct compound_head().
2474 */
2475 clear_compound_head(page_tail);
2476
2477 /* Finally unfreeze refcount. Additional reference from page cache. */
2478 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2479 PageSwapCache(head)));
2480
2481 if (page_is_young(head))
2482 set_page_young(page_tail);
2483 if (page_is_idle(head))
2484 set_page_idle(page_tail);
2485
2486 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2487
2488 /*
2489 * always add to the tail because some iterators expect new
2490 * pages to show after the currently processed elements - e.g.
2491 * migrate_pages
2492 */
2493 lru_add_page_tail(head, page_tail, lruvec, list);
2494 }
2495
__split_huge_page(struct page * page,struct list_head * list,pgoff_t end)2496 static void __split_huge_page(struct page *page, struct list_head *list,
2497 pgoff_t end)
2498 {
2499 struct folio *folio = page_folio(page);
2500 struct page *head = &folio->page;
2501 struct lruvec *lruvec;
2502 struct address_space *swap_cache = NULL;
2503 unsigned long offset = 0;
2504 unsigned int nr = thp_nr_pages(head);
2505 int i, nr_dropped = 0;
2506
2507 /* complete memcg works before add pages to LRU */
2508 split_page_memcg(head, nr);
2509
2510 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2511 offset = swp_offset(folio->swap);
2512 swap_cache = swap_address_space(folio->swap);
2513 xa_lock(&swap_cache->i_pages);
2514 }
2515
2516 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2517 lruvec = folio_lruvec_lock(folio);
2518
2519 ClearPageHasHWPoisoned(head);
2520
2521 for (i = nr - 1; i >= 1; i--) {
2522 __split_huge_page_tail(folio, i, lruvec, list);
2523 /* Some pages can be beyond EOF: drop them from page cache */
2524 if (head[i].index >= end) {
2525 struct folio *tail = page_folio(head + i);
2526
2527 if (shmem_mapping(head->mapping))
2528 nr_dropped++;
2529 else if (folio_test_clear_dirty(tail))
2530 folio_account_cleaned(tail,
2531 inode_to_wb(folio->mapping->host));
2532 __filemap_remove_folio(tail, NULL);
2533 folio_put(tail);
2534 } else if (!PageAnon(page)) {
2535 __xa_store(&head->mapping->i_pages, head[i].index,
2536 head + i, 0);
2537 } else if (swap_cache) {
2538 __xa_store(&swap_cache->i_pages, offset + i,
2539 head + i, 0);
2540 }
2541 }
2542
2543 ClearPageCompound(head);
2544 unlock_page_lruvec(lruvec);
2545 /* Caller disabled irqs, so they are still disabled here */
2546
2547 split_page_owner(head, nr);
2548
2549 /* See comment in __split_huge_page_tail() */
2550 if (PageAnon(head)) {
2551 /* Additional pin to swap cache */
2552 if (PageSwapCache(head)) {
2553 page_ref_add(head, 2);
2554 xa_unlock(&swap_cache->i_pages);
2555 } else {
2556 page_ref_inc(head);
2557 }
2558 } else {
2559 /* Additional pin to page cache */
2560 page_ref_add(head, 2);
2561 xa_unlock(&head->mapping->i_pages);
2562 }
2563 local_irq_enable();
2564
2565 if (nr_dropped)
2566 shmem_uncharge(head->mapping->host, nr_dropped);
2567 remap_page(folio, nr);
2568
2569 if (folio_test_swapcache(folio))
2570 split_swap_cluster(folio->swap);
2571
2572 for (i = 0; i < nr; i++) {
2573 struct page *subpage = head + i;
2574 if (subpage == page)
2575 continue;
2576 unlock_page(subpage);
2577
2578 /*
2579 * Subpages may be freed if there wasn't any mapping
2580 * like if add_to_swap() is running on a lru page that
2581 * had its mapping zapped. And freeing these pages
2582 * requires taking the lru_lock so we do the put_page
2583 * of the tail pages after the split is complete.
2584 */
2585 free_page_and_swap_cache(subpage);
2586 }
2587 }
2588
2589 /* Racy check whether the huge page can be split */
can_split_folio(struct folio * folio,int * pextra_pins)2590 bool can_split_folio(struct folio *folio, int *pextra_pins)
2591 {
2592 int extra_pins;
2593
2594 /* Additional pins from page cache */
2595 if (folio_test_anon(folio))
2596 extra_pins = folio_test_swapcache(folio) ?
2597 folio_nr_pages(folio) : 0;
2598 else
2599 extra_pins = folio_nr_pages(folio);
2600 if (pextra_pins)
2601 *pextra_pins = extra_pins;
2602 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2603 }
2604
2605 /*
2606 * This function splits huge page into normal pages. @page can point to any
2607 * subpage of huge page to split. Split doesn't change the position of @page.
2608 *
2609 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2610 * The huge page must be locked.
2611 *
2612 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2613 *
2614 * Both head page and tail pages will inherit mapping, flags, and so on from
2615 * the hugepage.
2616 *
2617 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2618 * they are not mapped.
2619 *
2620 * Returns 0 if the hugepage is split successfully.
2621 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2622 * us.
2623 */
split_huge_page_to_list(struct page * page,struct list_head * list)2624 int split_huge_page_to_list(struct page *page, struct list_head *list)
2625 {
2626 struct folio *folio = page_folio(page);
2627 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2628 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2629 struct anon_vma *anon_vma = NULL;
2630 struct address_space *mapping = NULL;
2631 int extra_pins, ret;
2632 pgoff_t end;
2633 bool is_hzp;
2634
2635 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2636 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2637
2638 is_hzp = is_huge_zero_page(&folio->page);
2639 if (is_hzp) {
2640 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
2641 return -EBUSY;
2642 }
2643
2644 if (folio_test_writeback(folio))
2645 return -EBUSY;
2646
2647 if (folio_test_anon(folio)) {
2648 /*
2649 * The caller does not necessarily hold an mmap_lock that would
2650 * prevent the anon_vma disappearing so we first we take a
2651 * reference to it and then lock the anon_vma for write. This
2652 * is similar to folio_lock_anon_vma_read except the write lock
2653 * is taken to serialise against parallel split or collapse
2654 * operations.
2655 */
2656 anon_vma = folio_get_anon_vma(folio);
2657 if (!anon_vma) {
2658 ret = -EBUSY;
2659 goto out;
2660 }
2661 end = -1;
2662 mapping = NULL;
2663 anon_vma_lock_write(anon_vma);
2664 } else {
2665 gfp_t gfp;
2666
2667 mapping = folio->mapping;
2668
2669 /* Truncated ? */
2670 if (!mapping) {
2671 ret = -EBUSY;
2672 goto out;
2673 }
2674
2675 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
2676 GFP_RECLAIM_MASK);
2677
2678 if (!filemap_release_folio(folio, gfp)) {
2679 ret = -EBUSY;
2680 goto out;
2681 }
2682
2683 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
2684 if (xas_error(&xas)) {
2685 ret = xas_error(&xas);
2686 goto out;
2687 }
2688
2689 anon_vma = NULL;
2690 i_mmap_lock_read(mapping);
2691
2692 /*
2693 *__split_huge_page() may need to trim off pages beyond EOF:
2694 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2695 * which cannot be nested inside the page tree lock. So note
2696 * end now: i_size itself may be changed at any moment, but
2697 * folio lock is good enough to serialize the trimming.
2698 */
2699 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2700 if (shmem_mapping(mapping))
2701 end = shmem_fallocend(mapping->host, end);
2702 }
2703
2704 /*
2705 * Racy check if we can split the page, before unmap_folio() will
2706 * split PMDs
2707 */
2708 if (!can_split_folio(folio, &extra_pins)) {
2709 ret = -EAGAIN;
2710 goto out_unlock;
2711 }
2712
2713 unmap_folio(folio);
2714
2715 /* block interrupt reentry in xa_lock and spinlock */
2716 local_irq_disable();
2717 if (mapping) {
2718 /*
2719 * Check if the folio is present in page cache.
2720 * We assume all tail are present too, if folio is there.
2721 */
2722 xas_lock(&xas);
2723 xas_reset(&xas);
2724 if (xas_load(&xas) != folio)
2725 goto fail;
2726 }
2727
2728 /* Prevent deferred_split_scan() touching ->_refcount */
2729 spin_lock(&ds_queue->split_queue_lock);
2730 if (folio_ref_freeze(folio, 1 + extra_pins)) {
2731 if (!list_empty(&folio->_deferred_list)) {
2732 ds_queue->split_queue_len--;
2733 list_del(&folio->_deferred_list);
2734 }
2735 spin_unlock(&ds_queue->split_queue_lock);
2736 if (mapping) {
2737 int nr = folio_nr_pages(folio);
2738
2739 xas_split(&xas, folio, folio_order(folio));
2740 if (folio_test_swapbacked(folio)) {
2741 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
2742 -nr);
2743 } else {
2744 __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
2745 -nr);
2746 filemap_nr_thps_dec(mapping);
2747 }
2748 }
2749
2750 __split_huge_page(page, list, end);
2751 ret = 0;
2752 } else {
2753 spin_unlock(&ds_queue->split_queue_lock);
2754 fail:
2755 if (mapping)
2756 xas_unlock(&xas);
2757 local_irq_enable();
2758 remap_page(folio, folio_nr_pages(folio));
2759 ret = -EAGAIN;
2760 }
2761
2762 out_unlock:
2763 if (anon_vma) {
2764 anon_vma_unlock_write(anon_vma);
2765 put_anon_vma(anon_vma);
2766 }
2767 if (mapping)
2768 i_mmap_unlock_read(mapping);
2769 out:
2770 xas_destroy(&xas);
2771 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2772 return ret;
2773 }
2774
folio_undo_large_rmappable(struct folio * folio)2775 void folio_undo_large_rmappable(struct folio *folio)
2776 {
2777 struct deferred_split *ds_queue;
2778 unsigned long flags;
2779
2780 /*
2781 * At this point, there is no one trying to add the folio to
2782 * deferred_list. If folio is not in deferred_list, it's safe
2783 * to check without acquiring the split_queue_lock.
2784 */
2785 if (data_race(list_empty(&folio->_deferred_list)))
2786 return;
2787
2788 ds_queue = get_deferred_split_queue(folio);
2789 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2790 if (!list_empty(&folio->_deferred_list)) {
2791 ds_queue->split_queue_len--;
2792 list_del(&folio->_deferred_list);
2793 }
2794 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2795 }
2796
deferred_split_folio(struct folio * folio)2797 void deferred_split_folio(struct folio *folio)
2798 {
2799 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2800 #ifdef CONFIG_MEMCG
2801 struct mem_cgroup *memcg = folio_memcg(folio);
2802 #endif
2803 unsigned long flags;
2804
2805 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
2806
2807 /*
2808 * The try_to_unmap() in page reclaim path might reach here too,
2809 * this may cause a race condition to corrupt deferred split queue.
2810 * And, if page reclaim is already handling the same folio, it is
2811 * unnecessary to handle it again in shrinker.
2812 *
2813 * Check the swapcache flag to determine if the folio is being
2814 * handled by page reclaim since THP swap would add the folio into
2815 * swap cache before calling try_to_unmap().
2816 */
2817 if (folio_test_swapcache(folio))
2818 return;
2819
2820 if (!list_empty(&folio->_deferred_list))
2821 return;
2822
2823 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2824 if (list_empty(&folio->_deferred_list)) {
2825 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2826 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
2827 ds_queue->split_queue_len++;
2828 #ifdef CONFIG_MEMCG
2829 if (memcg)
2830 set_shrinker_bit(memcg, folio_nid(folio),
2831 deferred_split_shrinker.id);
2832 #endif
2833 }
2834 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2835 }
2836
deferred_split_count(struct shrinker * shrink,struct shrink_control * sc)2837 static unsigned long deferred_split_count(struct shrinker *shrink,
2838 struct shrink_control *sc)
2839 {
2840 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2841 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2842
2843 #ifdef CONFIG_MEMCG
2844 if (sc->memcg)
2845 ds_queue = &sc->memcg->deferred_split_queue;
2846 #endif
2847 return READ_ONCE(ds_queue->split_queue_len);
2848 }
2849
deferred_split_scan(struct shrinker * shrink,struct shrink_control * sc)2850 static unsigned long deferred_split_scan(struct shrinker *shrink,
2851 struct shrink_control *sc)
2852 {
2853 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2854 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2855 unsigned long flags;
2856 LIST_HEAD(list);
2857 struct folio *folio, *next;
2858 int split = 0;
2859
2860 #ifdef CONFIG_MEMCG
2861 if (sc->memcg)
2862 ds_queue = &sc->memcg->deferred_split_queue;
2863 #endif
2864
2865 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2866 /* Take pin on all head pages to avoid freeing them under us */
2867 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
2868 _deferred_list) {
2869 if (folio_try_get(folio)) {
2870 list_move(&folio->_deferred_list, &list);
2871 } else {
2872 /* We lost race with folio_put() */
2873 list_del_init(&folio->_deferred_list);
2874 ds_queue->split_queue_len--;
2875 }
2876 if (!--sc->nr_to_scan)
2877 break;
2878 }
2879 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2880
2881 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
2882 if (!folio_trylock(folio))
2883 goto next;
2884 /* split_huge_page() removes page from list on success */
2885 if (!split_folio(folio))
2886 split++;
2887 folio_unlock(folio);
2888 next:
2889 folio_put(folio);
2890 }
2891
2892 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2893 list_splice_tail(&list, &ds_queue->split_queue);
2894 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2895
2896 /*
2897 * Stop shrinker if we didn't split any page, but the queue is empty.
2898 * This can happen if pages were freed under us.
2899 */
2900 if (!split && list_empty(&ds_queue->split_queue))
2901 return SHRINK_STOP;
2902 return split;
2903 }
2904
2905 static struct shrinker deferred_split_shrinker = {
2906 .count_objects = deferred_split_count,
2907 .scan_objects = deferred_split_scan,
2908 .seeks = DEFAULT_SEEKS,
2909 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
2910 SHRINKER_NONSLAB,
2911 };
2912
2913 #ifdef CONFIG_DEBUG_FS
split_huge_pages_all(void)2914 static void split_huge_pages_all(void)
2915 {
2916 struct zone *zone;
2917 struct page *page;
2918 struct folio *folio;
2919 unsigned long pfn, max_zone_pfn;
2920 unsigned long total = 0, split = 0;
2921
2922 pr_debug("Split all THPs\n");
2923 for_each_zone(zone) {
2924 if (!managed_zone(zone))
2925 continue;
2926 max_zone_pfn = zone_end_pfn(zone);
2927 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2928 int nr_pages;
2929
2930 page = pfn_to_online_page(pfn);
2931 if (!page || PageTail(page))
2932 continue;
2933 folio = page_folio(page);
2934 if (!folio_try_get(folio))
2935 continue;
2936
2937 if (unlikely(page_folio(page) != folio))
2938 goto next;
2939
2940 if (zone != folio_zone(folio))
2941 goto next;
2942
2943 if (!folio_test_large(folio)
2944 || folio_test_hugetlb(folio)
2945 || !folio_test_lru(folio))
2946 goto next;
2947
2948 total++;
2949 folio_lock(folio);
2950 nr_pages = folio_nr_pages(folio);
2951 if (!split_folio(folio))
2952 split++;
2953 pfn += nr_pages - 1;
2954 folio_unlock(folio);
2955 next:
2956 folio_put(folio);
2957 cond_resched();
2958 }
2959 }
2960
2961 pr_debug("%lu of %lu THP split\n", split, total);
2962 }
2963
vma_not_suitable_for_thp_split(struct vm_area_struct * vma)2964 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
2965 {
2966 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
2967 is_vm_hugetlb_page(vma);
2968 }
2969
split_huge_pages_pid(int pid,unsigned long vaddr_start,unsigned long vaddr_end)2970 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
2971 unsigned long vaddr_end)
2972 {
2973 int ret = 0;
2974 struct task_struct *task;
2975 struct mm_struct *mm;
2976 unsigned long total = 0, split = 0;
2977 unsigned long addr;
2978
2979 vaddr_start &= PAGE_MASK;
2980 vaddr_end &= PAGE_MASK;
2981
2982 /* Find the task_struct from pid */
2983 rcu_read_lock();
2984 task = find_task_by_vpid(pid);
2985 if (!task) {
2986 rcu_read_unlock();
2987 ret = -ESRCH;
2988 goto out;
2989 }
2990 get_task_struct(task);
2991 rcu_read_unlock();
2992
2993 /* Find the mm_struct */
2994 mm = get_task_mm(task);
2995 put_task_struct(task);
2996
2997 if (!mm) {
2998 ret = -EINVAL;
2999 goto out;
3000 }
3001
3002 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3003 pid, vaddr_start, vaddr_end);
3004
3005 mmap_read_lock(mm);
3006 /*
3007 * always increase addr by PAGE_SIZE, since we could have a PTE page
3008 * table filled with PTE-mapped THPs, each of which is distinct.
3009 */
3010 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3011 struct vm_area_struct *vma = vma_lookup(mm, addr);
3012 struct page *page;
3013 struct folio *folio;
3014
3015 if (!vma)
3016 break;
3017
3018 /* skip special VMA and hugetlb VMA */
3019 if (vma_not_suitable_for_thp_split(vma)) {
3020 addr = vma->vm_end;
3021 continue;
3022 }
3023
3024 /* FOLL_DUMP to ignore special (like zero) pages */
3025 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3026
3027 if (IS_ERR_OR_NULL(page))
3028 continue;
3029
3030 folio = page_folio(page);
3031 if (!is_transparent_hugepage(folio))
3032 goto next;
3033
3034 total++;
3035 if (!can_split_folio(folio, NULL))
3036 goto next;
3037
3038 if (!folio_trylock(folio))
3039 goto next;
3040
3041 if (!split_folio(folio))
3042 split++;
3043
3044 folio_unlock(folio);
3045 next:
3046 folio_put(folio);
3047 cond_resched();
3048 }
3049 mmap_read_unlock(mm);
3050 mmput(mm);
3051
3052 pr_debug("%lu of %lu THP split\n", split, total);
3053
3054 out:
3055 return ret;
3056 }
3057
split_huge_pages_in_file(const char * file_path,pgoff_t off_start,pgoff_t off_end)3058 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3059 pgoff_t off_end)
3060 {
3061 struct filename *file;
3062 struct file *candidate;
3063 struct address_space *mapping;
3064 int ret = -EINVAL;
3065 pgoff_t index;
3066 int nr_pages = 1;
3067 unsigned long total = 0, split = 0;
3068
3069 file = getname_kernel(file_path);
3070 if (IS_ERR(file))
3071 return ret;
3072
3073 candidate = file_open_name(file, O_RDONLY, 0);
3074 if (IS_ERR(candidate))
3075 goto out;
3076
3077 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3078 file_path, off_start, off_end);
3079
3080 mapping = candidate->f_mapping;
3081
3082 for (index = off_start; index < off_end; index += nr_pages) {
3083 struct folio *folio = filemap_get_folio(mapping, index);
3084
3085 nr_pages = 1;
3086 if (IS_ERR(folio))
3087 continue;
3088
3089 if (!folio_test_large(folio))
3090 goto next;
3091
3092 total++;
3093 nr_pages = folio_nr_pages(folio);
3094
3095 if (!folio_trylock(folio))
3096 goto next;
3097
3098 if (!split_folio(folio))
3099 split++;
3100
3101 folio_unlock(folio);
3102 next:
3103 folio_put(folio);
3104 cond_resched();
3105 }
3106
3107 filp_close(candidate, NULL);
3108 ret = 0;
3109
3110 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3111 out:
3112 putname(file);
3113 return ret;
3114 }
3115
3116 #define MAX_INPUT_BUF_SZ 255
3117
split_huge_pages_write(struct file * file,const char __user * buf,size_t count,loff_t * ppops)3118 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3119 size_t count, loff_t *ppops)
3120 {
3121 static DEFINE_MUTEX(split_debug_mutex);
3122 ssize_t ret;
3123 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3124 char input_buf[MAX_INPUT_BUF_SZ];
3125 int pid;
3126 unsigned long vaddr_start, vaddr_end;
3127
3128 ret = mutex_lock_interruptible(&split_debug_mutex);
3129 if (ret)
3130 return ret;
3131
3132 ret = -EFAULT;
3133
3134 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3135 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3136 goto out;
3137
3138 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3139
3140 if (input_buf[0] == '/') {
3141 char *tok;
3142 char *buf = input_buf;
3143 char file_path[MAX_INPUT_BUF_SZ];
3144 pgoff_t off_start = 0, off_end = 0;
3145 size_t input_len = strlen(input_buf);
3146
3147 tok = strsep(&buf, ",");
3148 if (tok) {
3149 strcpy(file_path, tok);
3150 } else {
3151 ret = -EINVAL;
3152 goto out;
3153 }
3154
3155 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3156 if (ret != 2) {
3157 ret = -EINVAL;
3158 goto out;
3159 }
3160 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3161 if (!ret)
3162 ret = input_len;
3163
3164 goto out;
3165 }
3166
3167 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3168 if (ret == 1 && pid == 1) {
3169 split_huge_pages_all();
3170 ret = strlen(input_buf);
3171 goto out;
3172 } else if (ret != 3) {
3173 ret = -EINVAL;
3174 goto out;
3175 }
3176
3177 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3178 if (!ret)
3179 ret = strlen(input_buf);
3180 out:
3181 mutex_unlock(&split_debug_mutex);
3182 return ret;
3183
3184 }
3185
3186 static const struct file_operations split_huge_pages_fops = {
3187 .owner = THIS_MODULE,
3188 .write = split_huge_pages_write,
3189 .llseek = no_llseek,
3190 };
3191
split_huge_pages_debugfs(void)3192 static int __init split_huge_pages_debugfs(void)
3193 {
3194 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3195 &split_huge_pages_fops);
3196 return 0;
3197 }
3198 late_initcall(split_huge_pages_debugfs);
3199 #endif
3200
3201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
set_pmd_migration_entry(struct page_vma_mapped_walk * pvmw,struct page * page)3202 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3203 struct page *page)
3204 {
3205 struct vm_area_struct *vma = pvmw->vma;
3206 struct mm_struct *mm = vma->vm_mm;
3207 unsigned long address = pvmw->address;
3208 bool anon_exclusive;
3209 pmd_t pmdval;
3210 swp_entry_t entry;
3211 pmd_t pmdswp;
3212
3213 if (!(pvmw->pmd && !pvmw->pte))
3214 return 0;
3215
3216 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3217 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3218
3219 /* See page_try_share_anon_rmap(): invalidate PMD first. */
3220 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3221 if (anon_exclusive && page_try_share_anon_rmap(page)) {
3222 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3223 return -EBUSY;
3224 }
3225
3226 if (pmd_dirty(pmdval))
3227 set_page_dirty(page);
3228 if (pmd_write(pmdval))
3229 entry = make_writable_migration_entry(page_to_pfn(page));
3230 else if (anon_exclusive)
3231 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3232 else
3233 entry = make_readable_migration_entry(page_to_pfn(page));
3234 if (pmd_young(pmdval))
3235 entry = make_migration_entry_young(entry);
3236 if (pmd_dirty(pmdval))
3237 entry = make_migration_entry_dirty(entry);
3238 pmdswp = swp_entry_to_pmd(entry);
3239 if (pmd_soft_dirty(pmdval))
3240 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3241 if (pmd_uffd_wp(pmdval))
3242 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
3243 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3244 page_remove_rmap(page, vma, true);
3245 put_page(page);
3246 trace_set_migration_pmd(address, pmd_val(pmdswp));
3247
3248 return 0;
3249 }
3250
remove_migration_pmd(struct page_vma_mapped_walk * pvmw,struct page * new)3251 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3252 {
3253 struct vm_area_struct *vma = pvmw->vma;
3254 struct mm_struct *mm = vma->vm_mm;
3255 unsigned long address = pvmw->address;
3256 unsigned long haddr = address & HPAGE_PMD_MASK;
3257 pmd_t pmde;
3258 swp_entry_t entry;
3259
3260 if (!(pvmw->pmd && !pvmw->pte))
3261 return;
3262
3263 entry = pmd_to_swp_entry(*pvmw->pmd);
3264 get_page(new);
3265 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3266 if (pmd_swp_soft_dirty(*pvmw->pmd))
3267 pmde = pmd_mksoft_dirty(pmde);
3268 if (is_writable_migration_entry(entry))
3269 pmde = pmd_mkwrite(pmde, vma);
3270 if (pmd_swp_uffd_wp(*pvmw->pmd))
3271 pmde = pmd_mkuffd_wp(pmde);
3272 if (!is_migration_entry_young(entry))
3273 pmde = pmd_mkold(pmde);
3274 /* NOTE: this may contain setting soft-dirty on some archs */
3275 if (PageDirty(new) && is_migration_entry_dirty(entry))
3276 pmde = pmd_mkdirty(pmde);
3277
3278 if (PageAnon(new)) {
3279 rmap_t rmap_flags = RMAP_COMPOUND;
3280
3281 if (!is_readable_migration_entry(entry))
3282 rmap_flags |= RMAP_EXCLUSIVE;
3283
3284 page_add_anon_rmap(new, vma, haddr, rmap_flags);
3285 } else {
3286 page_add_file_rmap(new, vma, true);
3287 }
3288 VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
3289 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3290
3291 /* No need to invalidate - it was non-present before */
3292 update_mmu_cache_pmd(vma, address, pvmw->pmd);
3293 trace_remove_migration_pmd(address, pmd_val(pmde));
3294 }
3295 #endif
3296