1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/mm_inline.h>
22 #include <linux/string.h>
23 #include <linux/uio.h>
24 #include <linux/ksm.h>
25 #include <linux/fs.h>
26 #include <linux/file.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagewalk.h>
30 #include <linux/swap.h>
31 #include <linux/swapops.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/mmu_notifier.h>
34
35 #include <asm/tlb.h>
36
37 #include "internal.h"
38 #include "swap.h"
39
40 struct madvise_walk_private {
41 struct mmu_gather *tlb;
42 bool pageout;
43 };
44
45 /*
46 * Any behaviour which results in changes to the vma->vm_flags needs to
47 * take mmap_lock for writing. Others, which simply traverse vmas, need
48 * to only take it for reading.
49 */
madvise_need_mmap_write(int behavior)50 static int madvise_need_mmap_write(int behavior)
51 {
52 switch (behavior) {
53 case MADV_REMOVE:
54 case MADV_WILLNEED:
55 case MADV_DONTNEED:
56 case MADV_DONTNEED_LOCKED:
57 case MADV_COLD:
58 case MADV_PAGEOUT:
59 case MADV_FREE:
60 case MADV_POPULATE_READ:
61 case MADV_POPULATE_WRITE:
62 case MADV_COLLAPSE:
63 return 0;
64 default:
65 /* be safe, default to 1. list exceptions explicitly */
66 return 1;
67 }
68 }
69
70 #ifdef CONFIG_ANON_VMA_NAME
anon_vma_name_alloc(const char * name)71 struct anon_vma_name *anon_vma_name_alloc(const char *name)
72 {
73 struct anon_vma_name *anon_name;
74 size_t count;
75
76 /* Add 1 for NUL terminator at the end of the anon_name->name */
77 count = strlen(name) + 1;
78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL);
79 if (anon_name) {
80 kref_init(&anon_name->kref);
81 memcpy(anon_name->name, name, count);
82 }
83
84 return anon_name;
85 }
86
anon_vma_name_free(struct kref * kref)87 void anon_vma_name_free(struct kref *kref)
88 {
89 struct anon_vma_name *anon_name =
90 container_of(kref, struct anon_vma_name, kref);
91 kfree(anon_name);
92 }
93
anon_vma_name(struct vm_area_struct * vma)94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
95 {
96 mmap_assert_locked(vma->vm_mm);
97
98 if (vma->vm_file)
99 return NULL;
100
101 return vma->anon_name;
102 }
103
104 /* mmap_lock should be write-locked */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)105 static int replace_anon_vma_name(struct vm_area_struct *vma,
106 struct anon_vma_name *anon_name)
107 {
108 struct anon_vma_name *orig_name = anon_vma_name(vma);
109
110 if (!anon_name) {
111 vma->anon_name = NULL;
112 anon_vma_name_put(orig_name);
113 return 0;
114 }
115
116 if (anon_vma_name_eq(orig_name, anon_name))
117 return 0;
118
119 vma->anon_name = anon_vma_name_reuse(anon_name);
120 anon_vma_name_put(orig_name);
121
122 return 0;
123 }
124 #else /* CONFIG_ANON_VMA_NAME */
replace_anon_vma_name(struct vm_area_struct * vma,struct anon_vma_name * anon_name)125 static int replace_anon_vma_name(struct vm_area_struct *vma,
126 struct anon_vma_name *anon_name)
127 {
128 if (anon_name)
129 return -EINVAL;
130
131 return 0;
132 }
133 #endif /* CONFIG_ANON_VMA_NAME */
134 /*
135 * Update the vm_flags on region of a vma, splitting it or merging it as
136 * necessary. Must be called with mmap_sem held for writing;
137 * Caller should ensure anon_name stability by raising its refcount even when
138 * anon_name belongs to a valid vma because this function might free that vma.
139 */
madvise_update_vma(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long new_flags,struct anon_vma_name * anon_name)140 static int madvise_update_vma(struct vm_area_struct *vma,
141 struct vm_area_struct **prev, unsigned long start,
142 unsigned long end, unsigned long new_flags,
143 struct anon_vma_name *anon_name)
144 {
145 struct mm_struct *mm = vma->vm_mm;
146 int error;
147 pgoff_t pgoff;
148
149 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
150 *prev = vma;
151 return 0;
152 }
153
154 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
155 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
156 vma->vm_file, pgoff, vma_policy(vma),
157 vma->vm_userfaultfd_ctx, anon_name);
158 if (*prev) {
159 vma = *prev;
160 goto success;
161 }
162
163 *prev = vma;
164
165 if (start != vma->vm_start) {
166 if (unlikely(mm->map_count >= sysctl_max_map_count))
167 return -ENOMEM;
168 error = __split_vma(mm, vma, start, 1);
169 if (error)
170 return error;
171 }
172
173 if (end != vma->vm_end) {
174 if (unlikely(mm->map_count >= sysctl_max_map_count))
175 return -ENOMEM;
176 error = __split_vma(mm, vma, end, 0);
177 if (error)
178 return error;
179 }
180
181 success:
182 /*
183 * vm_flags is protected by the mmap_lock held in write mode.
184 */
185 vma->vm_flags = new_flags;
186 if (!vma->vm_file) {
187 error = replace_anon_vma_name(vma, anon_name);
188 if (error)
189 return error;
190 }
191
192 return 0;
193 }
194
195 #ifdef CONFIG_SWAP
swapin_walk_pmd_entry(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)196 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
197 unsigned long end, struct mm_walk *walk)
198 {
199 struct vm_area_struct *vma = walk->private;
200 unsigned long index;
201 struct swap_iocb *splug = NULL;
202
203 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
204 return 0;
205
206 for (index = start; index != end; index += PAGE_SIZE) {
207 pte_t pte;
208 swp_entry_t entry;
209 struct page *page;
210 spinlock_t *ptl;
211 pte_t *ptep;
212
213 ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl);
214 pte = *ptep;
215 pte_unmap_unlock(ptep, ptl);
216
217 if (!is_swap_pte(pte))
218 continue;
219 entry = pte_to_swp_entry(pte);
220 if (unlikely(non_swap_entry(entry)))
221 continue;
222
223 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
224 vma, index, false, &splug);
225 if (page)
226 put_page(page);
227 }
228 swap_read_unplug(splug);
229
230 return 0;
231 }
232
233 static const struct mm_walk_ops swapin_walk_ops = {
234 .pmd_entry = swapin_walk_pmd_entry,
235 };
236
force_shm_swapin_readahead(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct address_space * mapping)237 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
238 unsigned long start, unsigned long end,
239 struct address_space *mapping)
240 {
241 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
242 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
243 struct page *page;
244 struct swap_iocb *splug = NULL;
245
246 rcu_read_lock();
247 xas_for_each(&xas, page, end_index) {
248 swp_entry_t swap;
249
250 if (!xa_is_value(page))
251 continue;
252 swap = radix_to_swp_entry(page);
253 /* There might be swapin error entries in shmem mapping. */
254 if (non_swap_entry(swap))
255 continue;
256 xas_pause(&xas);
257 rcu_read_unlock();
258
259 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
260 NULL, 0, false, &splug);
261 if (page)
262 put_page(page);
263
264 rcu_read_lock();
265 }
266 rcu_read_unlock();
267 swap_read_unplug(splug);
268
269 lru_add_drain(); /* Push any new pages onto the LRU now */
270 }
271 #endif /* CONFIG_SWAP */
272
273 /*
274 * Schedule all required I/O operations. Do not wait for completion.
275 */
madvise_willneed(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)276 static long madvise_willneed(struct vm_area_struct *vma,
277 struct vm_area_struct **prev,
278 unsigned long start, unsigned long end)
279 {
280 struct mm_struct *mm = vma->vm_mm;
281 struct file *file = vma->vm_file;
282 loff_t offset;
283
284 *prev = vma;
285 #ifdef CONFIG_SWAP
286 if (!file) {
287 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
288 lru_add_drain(); /* Push any new pages onto the LRU now */
289 return 0;
290 }
291
292 if (shmem_mapping(file->f_mapping)) {
293 force_shm_swapin_readahead(vma, start, end,
294 file->f_mapping);
295 return 0;
296 }
297 #else
298 if (!file)
299 return -EBADF;
300 #endif
301
302 if (IS_DAX(file_inode(file))) {
303 /* no bad return value, but ignore advice */
304 return 0;
305 }
306
307 /*
308 * Filesystem's fadvise may need to take various locks. We need to
309 * explicitly grab a reference because the vma (and hence the
310 * vma's reference to the file) can go away as soon as we drop
311 * mmap_lock.
312 */
313 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
314 get_file(file);
315 offset = (loff_t)(start - vma->vm_start)
316 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
317 mmap_read_unlock(mm);
318 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
319 fput(file);
320 mmap_read_lock(mm);
321 return 0;
322 }
323
madvise_cold_or_pageout_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)324 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
325 unsigned long addr, unsigned long end,
326 struct mm_walk *walk)
327 {
328 struct madvise_walk_private *private = walk->private;
329 struct mmu_gather *tlb = private->tlb;
330 bool pageout = private->pageout;
331 struct mm_struct *mm = tlb->mm;
332 struct vm_area_struct *vma = walk->vma;
333 pte_t *orig_pte, *pte, ptent;
334 spinlock_t *ptl;
335 struct page *page = NULL;
336 LIST_HEAD(page_list);
337
338 if (fatal_signal_pending(current))
339 return -EINTR;
340
341 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
342 if (pmd_trans_huge(*pmd)) {
343 pmd_t orig_pmd;
344 unsigned long next = pmd_addr_end(addr, end);
345
346 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
347 ptl = pmd_trans_huge_lock(pmd, vma);
348 if (!ptl)
349 return 0;
350
351 orig_pmd = *pmd;
352 if (is_huge_zero_pmd(orig_pmd))
353 goto huge_unlock;
354
355 if (unlikely(!pmd_present(orig_pmd))) {
356 VM_BUG_ON(thp_migration_supported() &&
357 !is_pmd_migration_entry(orig_pmd));
358 goto huge_unlock;
359 }
360
361 page = pmd_page(orig_pmd);
362
363 /* Do not interfere with other mappings of this page */
364 if (page_mapcount(page) != 1)
365 goto huge_unlock;
366
367 if (next - addr != HPAGE_PMD_SIZE) {
368 int err;
369
370 get_page(page);
371 spin_unlock(ptl);
372 lock_page(page);
373 err = split_huge_page(page);
374 unlock_page(page);
375 put_page(page);
376 if (!err)
377 goto regular_page;
378 return 0;
379 }
380
381 if (pmd_young(orig_pmd)) {
382 pmdp_invalidate(vma, addr, pmd);
383 orig_pmd = pmd_mkold(orig_pmd);
384
385 set_pmd_at(mm, addr, pmd, orig_pmd);
386 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
387 }
388
389 ClearPageReferenced(page);
390 test_and_clear_page_young(page);
391 if (pageout) {
392 if (!isolate_lru_page(page)) {
393 if (PageUnevictable(page))
394 putback_lru_page(page);
395 else
396 list_add(&page->lru, &page_list);
397 }
398 } else
399 deactivate_page(page);
400 huge_unlock:
401 spin_unlock(ptl);
402 if (pageout)
403 reclaim_pages(&page_list);
404 return 0;
405 }
406
407 regular_page:
408 if (pmd_trans_unstable(pmd))
409 return 0;
410 #endif
411 tlb_change_page_size(tlb, PAGE_SIZE);
412 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
413 flush_tlb_batched_pending(mm);
414 arch_enter_lazy_mmu_mode();
415 for (; addr < end; pte++, addr += PAGE_SIZE) {
416 ptent = *pte;
417
418 if (pte_none(ptent))
419 continue;
420
421 if (!pte_present(ptent))
422 continue;
423
424 page = vm_normal_page(vma, addr, ptent);
425 if (!page || is_zone_device_page(page))
426 continue;
427
428 /*
429 * Creating a THP page is expensive so split it only if we
430 * are sure it's worth. Split it if we are only owner.
431 */
432 if (PageTransCompound(page)) {
433 if (page_mapcount(page) != 1)
434 break;
435 get_page(page);
436 if (!trylock_page(page)) {
437 put_page(page);
438 break;
439 }
440 pte_unmap_unlock(orig_pte, ptl);
441 if (split_huge_page(page)) {
442 unlock_page(page);
443 put_page(page);
444 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
445 break;
446 }
447 unlock_page(page);
448 put_page(page);
449 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
450 pte--;
451 addr -= PAGE_SIZE;
452 continue;
453 }
454
455 /*
456 * Do not interfere with other mappings of this page and
457 * non-LRU page.
458 */
459 if (!PageLRU(page) || page_mapcount(page) != 1)
460 continue;
461
462 VM_BUG_ON_PAGE(PageTransCompound(page), page);
463
464 if (pte_young(ptent)) {
465 ptent = ptep_get_and_clear_full(mm, addr, pte,
466 tlb->fullmm);
467 ptent = pte_mkold(ptent);
468 set_pte_at(mm, addr, pte, ptent);
469 tlb_remove_tlb_entry(tlb, pte, addr);
470 }
471
472 /*
473 * We are deactivating a page for accelerating reclaiming.
474 * VM couldn't reclaim the page unless we clear PG_young.
475 * As a side effect, it makes confuse idle-page tracking
476 * because they will miss recent referenced history.
477 */
478 ClearPageReferenced(page);
479 test_and_clear_page_young(page);
480 if (pageout) {
481 if (!isolate_lru_page(page)) {
482 if (PageUnevictable(page))
483 putback_lru_page(page);
484 else
485 list_add(&page->lru, &page_list);
486 }
487 } else
488 deactivate_page(page);
489 }
490
491 arch_leave_lazy_mmu_mode();
492 pte_unmap_unlock(orig_pte, ptl);
493 if (pageout)
494 reclaim_pages(&page_list);
495 cond_resched();
496
497 return 0;
498 }
499
500 static const struct mm_walk_ops cold_walk_ops = {
501 .pmd_entry = madvise_cold_or_pageout_pte_range,
502 };
503
madvise_cold_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end)504 static void madvise_cold_page_range(struct mmu_gather *tlb,
505 struct vm_area_struct *vma,
506 unsigned long addr, unsigned long end)
507 {
508 struct madvise_walk_private walk_private = {
509 .pageout = false,
510 .tlb = tlb,
511 };
512
513 tlb_start_vma(tlb, vma);
514 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
515 tlb_end_vma(tlb, vma);
516 }
517
can_madv_lru_vma(struct vm_area_struct * vma)518 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
519 {
520 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
521 }
522
madvise_cold(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)523 static long madvise_cold(struct vm_area_struct *vma,
524 struct vm_area_struct **prev,
525 unsigned long start_addr, unsigned long end_addr)
526 {
527 struct mm_struct *mm = vma->vm_mm;
528 struct mmu_gather tlb;
529
530 *prev = vma;
531 if (!can_madv_lru_vma(vma))
532 return -EINVAL;
533
534 lru_add_drain();
535 tlb_gather_mmu(&tlb, mm);
536 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
537 tlb_finish_mmu(&tlb);
538
539 return 0;
540 }
541
madvise_pageout_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end)542 static void madvise_pageout_page_range(struct mmu_gather *tlb,
543 struct vm_area_struct *vma,
544 unsigned long addr, unsigned long end)
545 {
546 struct madvise_walk_private walk_private = {
547 .pageout = true,
548 .tlb = tlb,
549 };
550
551 tlb_start_vma(tlb, vma);
552 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
553 tlb_end_vma(tlb, vma);
554 }
555
can_do_pageout(struct vm_area_struct * vma)556 static inline bool can_do_pageout(struct vm_area_struct *vma)
557 {
558 if (vma_is_anonymous(vma))
559 return true;
560 if (!vma->vm_file)
561 return false;
562 /*
563 * paging out pagecache only for non-anonymous mappings that correspond
564 * to the files the calling process could (if tried) open for writing;
565 * otherwise we'd be including shared non-exclusive mappings, which
566 * opens a side channel.
567 */
568 return inode_owner_or_capable(&init_user_ns,
569 file_inode(vma->vm_file)) ||
570 file_permission(vma->vm_file, MAY_WRITE) == 0;
571 }
572
madvise_pageout(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start_addr,unsigned long end_addr)573 static long madvise_pageout(struct vm_area_struct *vma,
574 struct vm_area_struct **prev,
575 unsigned long start_addr, unsigned long end_addr)
576 {
577 struct mm_struct *mm = vma->vm_mm;
578 struct mmu_gather tlb;
579
580 *prev = vma;
581 if (!can_madv_lru_vma(vma))
582 return -EINVAL;
583
584 if (!can_do_pageout(vma))
585 return 0;
586
587 lru_add_drain();
588 tlb_gather_mmu(&tlb, mm);
589 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
590 tlb_finish_mmu(&tlb);
591
592 return 0;
593 }
594
madvise_free_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)595 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
596 unsigned long end, struct mm_walk *walk)
597
598 {
599 struct mmu_gather *tlb = walk->private;
600 struct mm_struct *mm = tlb->mm;
601 struct vm_area_struct *vma = walk->vma;
602 spinlock_t *ptl;
603 pte_t *orig_pte, *pte, ptent;
604 struct folio *folio;
605 struct page *page;
606 int nr_swap = 0;
607 unsigned long next;
608
609 next = pmd_addr_end(addr, end);
610 if (pmd_trans_huge(*pmd))
611 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
612 goto next;
613
614 if (pmd_trans_unstable(pmd))
615 return 0;
616
617 tlb_change_page_size(tlb, PAGE_SIZE);
618 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
619 flush_tlb_batched_pending(mm);
620 arch_enter_lazy_mmu_mode();
621 for (; addr != end; pte++, addr += PAGE_SIZE) {
622 ptent = *pte;
623
624 if (pte_none(ptent))
625 continue;
626 /*
627 * If the pte has swp_entry, just clear page table to
628 * prevent swap-in which is more expensive rather than
629 * (page allocation + zeroing).
630 */
631 if (!pte_present(ptent)) {
632 swp_entry_t entry;
633
634 entry = pte_to_swp_entry(ptent);
635 if (!non_swap_entry(entry)) {
636 nr_swap--;
637 free_swap_and_cache(entry);
638 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
639 } else if (is_hwpoison_entry(entry) ||
640 is_swapin_error_entry(entry)) {
641 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
642 }
643 continue;
644 }
645
646 page = vm_normal_page(vma, addr, ptent);
647 if (!page || is_zone_device_page(page))
648 continue;
649 folio = page_folio(page);
650
651 /*
652 * If pmd isn't transhuge but the folio is large and
653 * is owned by only this process, split it and
654 * deactivate all pages.
655 */
656 if (folio_test_large(folio)) {
657 if (folio_mapcount(folio) != 1)
658 goto out;
659 folio_get(folio);
660 if (!folio_trylock(folio)) {
661 folio_put(folio);
662 goto out;
663 }
664 pte_unmap_unlock(orig_pte, ptl);
665 if (split_folio(folio)) {
666 folio_unlock(folio);
667 folio_put(folio);
668 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
669 goto out;
670 }
671 folio_unlock(folio);
672 folio_put(folio);
673 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
674 pte--;
675 addr -= PAGE_SIZE;
676 continue;
677 }
678
679 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
680 if (!folio_trylock(folio))
681 continue;
682 /*
683 * If folio is shared with others, we mustn't clear
684 * the folio's dirty flag.
685 */
686 if (folio_mapcount(folio) != 1) {
687 folio_unlock(folio);
688 continue;
689 }
690
691 if (folio_test_swapcache(folio) &&
692 !folio_free_swap(folio)) {
693 folio_unlock(folio);
694 continue;
695 }
696
697 folio_clear_dirty(folio);
698 folio_unlock(folio);
699 }
700
701 if (pte_young(ptent) || pte_dirty(ptent)) {
702 /*
703 * Some of architecture(ex, PPC) don't update TLB
704 * with set_pte_at and tlb_remove_tlb_entry so for
705 * the portability, remap the pte with old|clean
706 * after pte clearing.
707 */
708 ptent = ptep_get_and_clear_full(mm, addr, pte,
709 tlb->fullmm);
710
711 ptent = pte_mkold(ptent);
712 ptent = pte_mkclean(ptent);
713 set_pte_at(mm, addr, pte, ptent);
714 tlb_remove_tlb_entry(tlb, pte, addr);
715 }
716 mark_page_lazyfree(&folio->page);
717 }
718 out:
719 if (nr_swap) {
720 if (current->mm == mm)
721 sync_mm_rss(mm);
722
723 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
724 }
725 arch_leave_lazy_mmu_mode();
726 pte_unmap_unlock(orig_pte, ptl);
727 cond_resched();
728 next:
729 return 0;
730 }
731
732 static const struct mm_walk_ops madvise_free_walk_ops = {
733 .pmd_entry = madvise_free_pte_range,
734 };
735
madvise_free_single_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)736 static int madvise_free_single_vma(struct vm_area_struct *vma,
737 unsigned long start_addr, unsigned long end_addr)
738 {
739 struct mm_struct *mm = vma->vm_mm;
740 struct mmu_notifier_range range;
741 struct mmu_gather tlb;
742
743 /* MADV_FREE works for only anon vma at the moment */
744 if (!vma_is_anonymous(vma))
745 return -EINVAL;
746
747 range.start = max(vma->vm_start, start_addr);
748 if (range.start >= vma->vm_end)
749 return -EINVAL;
750 range.end = min(vma->vm_end, end_addr);
751 if (range.end <= vma->vm_start)
752 return -EINVAL;
753 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
754 range.start, range.end);
755
756 lru_add_drain();
757 tlb_gather_mmu(&tlb, mm);
758 update_hiwater_rss(mm);
759
760 mmu_notifier_invalidate_range_start(&range);
761 tlb_start_vma(&tlb, vma);
762 walk_page_range(vma->vm_mm, range.start, range.end,
763 &madvise_free_walk_ops, &tlb);
764 tlb_end_vma(&tlb, vma);
765 mmu_notifier_invalidate_range_end(&range);
766 tlb_finish_mmu(&tlb);
767
768 return 0;
769 }
770
771 /*
772 * Application no longer needs these pages. If the pages are dirty,
773 * it's OK to just throw them away. The app will be more careful about
774 * data it wants to keep. Be sure to free swap resources too. The
775 * zap_page_range_single call sets things up for shrink_active_list to actually
776 * free these pages later if no one else has touched them in the meantime,
777 * although we could add these pages to a global reuse list for
778 * shrink_active_list to pick up before reclaiming other pages.
779 *
780 * NB: This interface discards data rather than pushes it out to swap,
781 * as some implementations do. This has performance implications for
782 * applications like large transactional databases which want to discard
783 * pages in anonymous maps after committing to backing store the data
784 * that was kept in them. There is no reason to write this data out to
785 * the swap area if the application is discarding it.
786 *
787 * An interface that causes the system to free clean pages and flush
788 * dirty pages is already available as msync(MS_INVALIDATE).
789 */
madvise_dontneed_single_vma(struct vm_area_struct * vma,unsigned long start,unsigned long end)790 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
791 unsigned long start, unsigned long end)
792 {
793 zap_page_range_single(vma, start, end - start, NULL);
794 return 0;
795 }
796
madvise_dontneed_free_valid_vma(struct vm_area_struct * vma,unsigned long start,unsigned long * end,int behavior)797 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
798 unsigned long start,
799 unsigned long *end,
800 int behavior)
801 {
802 if (!is_vm_hugetlb_page(vma)) {
803 unsigned int forbidden = VM_PFNMAP;
804
805 if (behavior != MADV_DONTNEED_LOCKED)
806 forbidden |= VM_LOCKED;
807
808 return !(vma->vm_flags & forbidden);
809 }
810
811 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED)
812 return false;
813 if (start & ~huge_page_mask(hstate_vma(vma)))
814 return false;
815
816 /*
817 * Madvise callers expect the length to be rounded up to PAGE_SIZE
818 * boundaries, and may be unaware that this VMA uses huge pages.
819 * Avoid unexpected data loss by rounding down the number of
820 * huge pages freed.
821 */
822 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
823
824 return true;
825 }
826
madvise_dontneed_free(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)827 static long madvise_dontneed_free(struct vm_area_struct *vma,
828 struct vm_area_struct **prev,
829 unsigned long start, unsigned long end,
830 int behavior)
831 {
832 struct mm_struct *mm = vma->vm_mm;
833
834 *prev = vma;
835 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
836 return -EINVAL;
837
838 if (start == end)
839 return 0;
840
841 if (!userfaultfd_remove(vma, start, end)) {
842 *prev = NULL; /* mmap_lock has been dropped, prev is stale */
843
844 mmap_read_lock(mm);
845 vma = find_vma(mm, start);
846 if (!vma)
847 return -ENOMEM;
848 if (start < vma->vm_start) {
849 /*
850 * This "vma" under revalidation is the one
851 * with the lowest vma->vm_start where start
852 * is also < vma->vm_end. If start <
853 * vma->vm_start it means an hole materialized
854 * in the user address space within the
855 * virtual range passed to MADV_DONTNEED
856 * or MADV_FREE.
857 */
858 return -ENOMEM;
859 }
860 /*
861 * Potential end adjustment for hugetlb vma is OK as
862 * the check below keeps end within vma.
863 */
864 if (!madvise_dontneed_free_valid_vma(vma, start, &end,
865 behavior))
866 return -EINVAL;
867 if (end > vma->vm_end) {
868 /*
869 * Don't fail if end > vma->vm_end. If the old
870 * vma was split while the mmap_lock was
871 * released the effect of the concurrent
872 * operation may not cause madvise() to
873 * have an undefined result. There may be an
874 * adjacent next vma that we'll walk
875 * next. userfaultfd_remove() will generate an
876 * UFFD_EVENT_REMOVE repetition on the
877 * end-vma->vm_end range, but the manager can
878 * handle a repetition fine.
879 */
880 end = vma->vm_end;
881 }
882 VM_WARN_ON(start >= end);
883 }
884
885 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
886 return madvise_dontneed_single_vma(vma, start, end);
887 else if (behavior == MADV_FREE)
888 return madvise_free_single_vma(vma, start, end);
889 else
890 return -EINVAL;
891 }
892
madvise_populate(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,int behavior)893 static long madvise_populate(struct vm_area_struct *vma,
894 struct vm_area_struct **prev,
895 unsigned long start, unsigned long end,
896 int behavior)
897 {
898 const bool write = behavior == MADV_POPULATE_WRITE;
899 struct mm_struct *mm = vma->vm_mm;
900 unsigned long tmp_end;
901 int locked = 1;
902 long pages;
903
904 *prev = vma;
905
906 while (start < end) {
907 /*
908 * We might have temporarily dropped the lock. For example,
909 * our VMA might have been split.
910 */
911 if (!vma || start >= vma->vm_end) {
912 vma = vma_lookup(mm, start);
913 if (!vma)
914 return -ENOMEM;
915 }
916
917 tmp_end = min_t(unsigned long, end, vma->vm_end);
918 /* Populate (prefault) page tables readable/writable. */
919 pages = faultin_vma_page_range(vma, start, tmp_end, write,
920 &locked);
921 if (!locked) {
922 mmap_read_lock(mm);
923 locked = 1;
924 *prev = NULL;
925 vma = NULL;
926 }
927 if (pages < 0) {
928 switch (pages) {
929 case -EINTR:
930 return -EINTR;
931 case -EINVAL: /* Incompatible mappings / permissions. */
932 return -EINVAL;
933 case -EHWPOISON:
934 return -EHWPOISON;
935 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
936 return -EFAULT;
937 default:
938 pr_warn_once("%s: unhandled return value: %ld\n",
939 __func__, pages);
940 fallthrough;
941 case -ENOMEM:
942 return -ENOMEM;
943 }
944 }
945 start += pages * PAGE_SIZE;
946 }
947 return 0;
948 }
949
950 /*
951 * Application wants to free up the pages and associated backing store.
952 * This is effectively punching a hole into the middle of a file.
953 */
madvise_remove(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end)954 static long madvise_remove(struct vm_area_struct *vma,
955 struct vm_area_struct **prev,
956 unsigned long start, unsigned long end)
957 {
958 loff_t offset;
959 int error;
960 struct file *f;
961 struct mm_struct *mm = vma->vm_mm;
962
963 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
964
965 if (vma->vm_flags & VM_LOCKED)
966 return -EINVAL;
967
968 f = vma->vm_file;
969
970 if (!f || !f->f_mapping || !f->f_mapping->host) {
971 return -EINVAL;
972 }
973
974 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
975 return -EACCES;
976
977 offset = (loff_t)(start - vma->vm_start)
978 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
979
980 /*
981 * Filesystem's fallocate may need to take i_rwsem. We need to
982 * explicitly grab a reference because the vma (and hence the
983 * vma's reference to the file) can go away as soon as we drop
984 * mmap_lock.
985 */
986 get_file(f);
987 if (userfaultfd_remove(vma, start, end)) {
988 /* mmap_lock was not released by userfaultfd_remove() */
989 mmap_read_unlock(mm);
990 }
991 error = vfs_fallocate(f,
992 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
993 offset, end - start);
994 fput(f);
995 mmap_read_lock(mm);
996 return error;
997 }
998
999 /*
1000 * Apply an madvise behavior to a region of a vma. madvise_update_vma
1001 * will handle splitting a vm area into separate areas, each area with its own
1002 * behavior.
1003 */
madvise_vma_behavior(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long behavior)1004 static int madvise_vma_behavior(struct vm_area_struct *vma,
1005 struct vm_area_struct **prev,
1006 unsigned long start, unsigned long end,
1007 unsigned long behavior)
1008 {
1009 int error;
1010 struct anon_vma_name *anon_name;
1011 unsigned long new_flags = vma->vm_flags;
1012
1013 switch (behavior) {
1014 case MADV_REMOVE:
1015 return madvise_remove(vma, prev, start, end);
1016 case MADV_WILLNEED:
1017 return madvise_willneed(vma, prev, start, end);
1018 case MADV_COLD:
1019 return madvise_cold(vma, prev, start, end);
1020 case MADV_PAGEOUT:
1021 return madvise_pageout(vma, prev, start, end);
1022 case MADV_FREE:
1023 case MADV_DONTNEED:
1024 case MADV_DONTNEED_LOCKED:
1025 return madvise_dontneed_free(vma, prev, start, end, behavior);
1026 case MADV_POPULATE_READ:
1027 case MADV_POPULATE_WRITE:
1028 return madvise_populate(vma, prev, start, end, behavior);
1029 case MADV_NORMAL:
1030 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
1031 break;
1032 case MADV_SEQUENTIAL:
1033 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
1034 break;
1035 case MADV_RANDOM:
1036 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
1037 break;
1038 case MADV_DONTFORK:
1039 new_flags |= VM_DONTCOPY;
1040 break;
1041 case MADV_DOFORK:
1042 if (vma->vm_flags & VM_IO)
1043 return -EINVAL;
1044 new_flags &= ~VM_DONTCOPY;
1045 break;
1046 case MADV_WIPEONFORK:
1047 /* MADV_WIPEONFORK is only supported on anonymous memory. */
1048 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1049 return -EINVAL;
1050 new_flags |= VM_WIPEONFORK;
1051 break;
1052 case MADV_KEEPONFORK:
1053 new_flags &= ~VM_WIPEONFORK;
1054 break;
1055 case MADV_DONTDUMP:
1056 new_flags |= VM_DONTDUMP;
1057 break;
1058 case MADV_DODUMP:
1059 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
1060 return -EINVAL;
1061 new_flags &= ~VM_DONTDUMP;
1062 break;
1063 case MADV_MERGEABLE:
1064 case MADV_UNMERGEABLE:
1065 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1066 if (error)
1067 goto out;
1068 break;
1069 case MADV_HUGEPAGE:
1070 case MADV_NOHUGEPAGE:
1071 error = hugepage_madvise(vma, &new_flags, behavior);
1072 if (error)
1073 goto out;
1074 break;
1075 case MADV_COLLAPSE:
1076 return madvise_collapse(vma, prev, start, end);
1077 }
1078
1079 anon_name = anon_vma_name(vma);
1080 anon_vma_name_get(anon_name);
1081 error = madvise_update_vma(vma, prev, start, end, new_flags,
1082 anon_name);
1083 anon_vma_name_put(anon_name);
1084
1085 out:
1086 /*
1087 * madvise() returns EAGAIN if kernel resources, such as
1088 * slab, are temporarily unavailable.
1089 */
1090 if (error == -ENOMEM)
1091 error = -EAGAIN;
1092 return error;
1093 }
1094
1095 #ifdef CONFIG_MEMORY_FAILURE
1096 /*
1097 * Error injection support for memory error handling.
1098 */
madvise_inject_error(int behavior,unsigned long start,unsigned long end)1099 static int madvise_inject_error(int behavior,
1100 unsigned long start, unsigned long end)
1101 {
1102 unsigned long size;
1103
1104 if (!capable(CAP_SYS_ADMIN))
1105 return -EPERM;
1106
1107
1108 for (; start < end; start += size) {
1109 unsigned long pfn;
1110 struct page *page;
1111 int ret;
1112
1113 ret = get_user_pages_fast(start, 1, 0, &page);
1114 if (ret != 1)
1115 return ret;
1116 pfn = page_to_pfn(page);
1117
1118 /*
1119 * When soft offlining hugepages, after migrating the page
1120 * we dissolve it, therefore in the second loop "page" will
1121 * no longer be a compound page.
1122 */
1123 size = page_size(compound_head(page));
1124
1125 if (behavior == MADV_SOFT_OFFLINE) {
1126 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
1127 pfn, start);
1128 ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
1129 } else {
1130 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1131 pfn, start);
1132 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED);
1133 if (ret == -EOPNOTSUPP)
1134 ret = 0;
1135 }
1136
1137 if (ret)
1138 return ret;
1139 }
1140
1141 return 0;
1142 }
1143 #endif
1144
1145 static bool
madvise_behavior_valid(int behavior)1146 madvise_behavior_valid(int behavior)
1147 {
1148 switch (behavior) {
1149 case MADV_DOFORK:
1150 case MADV_DONTFORK:
1151 case MADV_NORMAL:
1152 case MADV_SEQUENTIAL:
1153 case MADV_RANDOM:
1154 case MADV_REMOVE:
1155 case MADV_WILLNEED:
1156 case MADV_DONTNEED:
1157 case MADV_DONTNEED_LOCKED:
1158 case MADV_FREE:
1159 case MADV_COLD:
1160 case MADV_PAGEOUT:
1161 case MADV_POPULATE_READ:
1162 case MADV_POPULATE_WRITE:
1163 #ifdef CONFIG_KSM
1164 case MADV_MERGEABLE:
1165 case MADV_UNMERGEABLE:
1166 #endif
1167 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1168 case MADV_HUGEPAGE:
1169 case MADV_NOHUGEPAGE:
1170 case MADV_COLLAPSE:
1171 #endif
1172 case MADV_DONTDUMP:
1173 case MADV_DODUMP:
1174 case MADV_WIPEONFORK:
1175 case MADV_KEEPONFORK:
1176 #ifdef CONFIG_MEMORY_FAILURE
1177 case MADV_SOFT_OFFLINE:
1178 case MADV_HWPOISON:
1179 #endif
1180 return true;
1181
1182 default:
1183 return false;
1184 }
1185 }
1186
process_madvise_behavior_valid(int behavior)1187 static bool process_madvise_behavior_valid(int behavior)
1188 {
1189 switch (behavior) {
1190 case MADV_COLD:
1191 case MADV_PAGEOUT:
1192 case MADV_WILLNEED:
1193 case MADV_COLLAPSE:
1194 return true;
1195 default:
1196 return false;
1197 }
1198 }
1199
1200 /*
1201 * Walk the vmas in range [start,end), and call the visit function on each one.
1202 * The visit function will get start and end parameters that cover the overlap
1203 * between the current vma and the original range. Any unmapped regions in the
1204 * original range will result in this function returning -ENOMEM while still
1205 * calling the visit function on all of the existing vmas in the range.
1206 * Must be called with the mmap_lock held for reading or writing.
1207 */
1208 static
madvise_walk_vmas(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned long arg,int (* visit)(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long arg))1209 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
1210 unsigned long end, unsigned long arg,
1211 int (*visit)(struct vm_area_struct *vma,
1212 struct vm_area_struct **prev, unsigned long start,
1213 unsigned long end, unsigned long arg))
1214 {
1215 struct vm_area_struct *vma;
1216 struct vm_area_struct *prev;
1217 unsigned long tmp;
1218 int unmapped_error = 0;
1219
1220 /*
1221 * If the interval [start,end) covers some unmapped address
1222 * ranges, just ignore them, but return -ENOMEM at the end.
1223 * - different from the way of handling in mlock etc.
1224 */
1225 vma = find_vma_prev(mm, start, &prev);
1226 if (vma && start > vma->vm_start)
1227 prev = vma;
1228
1229 for (;;) {
1230 int error;
1231
1232 /* Still start < end. */
1233 if (!vma)
1234 return -ENOMEM;
1235
1236 /* Here start < (end|vma->vm_end). */
1237 if (start < vma->vm_start) {
1238 unmapped_error = -ENOMEM;
1239 start = vma->vm_start;
1240 if (start >= end)
1241 break;
1242 }
1243
1244 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1245 tmp = vma->vm_end;
1246 if (end < tmp)
1247 tmp = end;
1248
1249 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1250 error = visit(vma, &prev, start, tmp, arg);
1251 if (error)
1252 return error;
1253 start = tmp;
1254 if (prev && start < prev->vm_end)
1255 start = prev->vm_end;
1256 if (start >= end)
1257 break;
1258 if (prev)
1259 vma = find_vma(mm, prev->vm_end);
1260 else /* madvise_remove dropped mmap_lock */
1261 vma = find_vma(mm, start);
1262 }
1263
1264 return unmapped_error;
1265 }
1266
1267 #ifdef CONFIG_ANON_VMA_NAME
madvise_vma_anon_name(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,unsigned long anon_name)1268 static int madvise_vma_anon_name(struct vm_area_struct *vma,
1269 struct vm_area_struct **prev,
1270 unsigned long start, unsigned long end,
1271 unsigned long anon_name)
1272 {
1273 int error;
1274
1275 /* Only anonymous mappings can be named */
1276 if (vma->vm_file)
1277 return -EBADF;
1278
1279 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
1280 (struct anon_vma_name *)anon_name);
1281
1282 /*
1283 * madvise() returns EAGAIN if kernel resources, such as
1284 * slab, are temporarily unavailable.
1285 */
1286 if (error == -ENOMEM)
1287 error = -EAGAIN;
1288 return error;
1289 }
1290
madvise_set_anon_name(struct mm_struct * mm,unsigned long start,unsigned long len_in,struct anon_vma_name * anon_name)1291 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
1292 unsigned long len_in, struct anon_vma_name *anon_name)
1293 {
1294 unsigned long end;
1295 unsigned long len;
1296
1297 if (start & ~PAGE_MASK)
1298 return -EINVAL;
1299 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
1300
1301 /* Check to see whether len was rounded up from small -ve to zero */
1302 if (len_in && !len)
1303 return -EINVAL;
1304
1305 end = start + len;
1306 if (end < start)
1307 return -EINVAL;
1308
1309 if (end == start)
1310 return 0;
1311
1312 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
1313 madvise_vma_anon_name);
1314 }
1315 #endif /* CONFIG_ANON_VMA_NAME */
1316 /*
1317 * The madvise(2) system call.
1318 *
1319 * Applications can use madvise() to advise the kernel how it should
1320 * handle paging I/O in this VM area. The idea is to help the kernel
1321 * use appropriate read-ahead and caching techniques. The information
1322 * provided is advisory only, and can be safely disregarded by the
1323 * kernel without affecting the correct operation of the application.
1324 *
1325 * behavior values:
1326 * MADV_NORMAL - the default behavior is to read clusters. This
1327 * results in some read-ahead and read-behind.
1328 * MADV_RANDOM - the system should read the minimum amount of data
1329 * on any access, since it is unlikely that the appli-
1330 * cation will need more than what it asks for.
1331 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1332 * once, so they can be aggressively read ahead, and
1333 * can be freed soon after they are accessed.
1334 * MADV_WILLNEED - the application is notifying the system to read
1335 * some pages ahead.
1336 * MADV_DONTNEED - the application is finished with the given range,
1337 * so the kernel can free resources associated with it.
1338 * MADV_FREE - the application marks pages in the given range as lazy free,
1339 * where actual purges are postponed until memory pressure happens.
1340 * MADV_REMOVE - the application wants to free up the given range of
1341 * pages and associated backing store.
1342 * MADV_DONTFORK - omit this area from child's address space when forking:
1343 * typically, to avoid COWing pages pinned by get_user_pages().
1344 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1345 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1346 * range after a fork.
1347 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1348 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1349 * were corrupted by unrecoverable hardware memory failure.
1350 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1351 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1352 * this area with pages of identical content from other such areas.
1353 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1354 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1355 * huge pages in the future. Existing pages might be coalesced and
1356 * new pages might be allocated as THP.
1357 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1358 * transparent huge pages so the existing pages will not be
1359 * coalesced into THP and new pages will not be allocated as THP.
1360 * MADV_COLLAPSE - synchronously coalesce pages into new THP.
1361 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1362 * from being included in its core dump.
1363 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1364 * MADV_COLD - the application is not expected to use this memory soon,
1365 * deactivate pages in this range so that they can be reclaimed
1366 * easily if memory pressure happens.
1367 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1368 * page out the pages in this range immediately.
1369 * MADV_POPULATE_READ - populate (prefault) page tables readable by
1370 * triggering read faults if required
1371 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by
1372 * triggering write faults if required
1373 *
1374 * return values:
1375 * zero - success
1376 * -EINVAL - start + len < 0, start is not page-aligned,
1377 * "behavior" is not a valid value, or application
1378 * is attempting to release locked or shared pages,
1379 * or the specified address range includes file, Huge TLB,
1380 * MAP_SHARED or VMPFNMAP range.
1381 * -ENOMEM - addresses in the specified range are not currently
1382 * mapped, or are outside the AS of the process.
1383 * -EIO - an I/O error occurred while paging in data.
1384 * -EBADF - map exists, but area maps something that isn't a file.
1385 * -EAGAIN - a kernel resource was temporarily unavailable.
1386 */
do_madvise(struct mm_struct * mm,unsigned long start,size_t len_in,int behavior)1387 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1388 {
1389 unsigned long end;
1390 int error;
1391 int write;
1392 size_t len;
1393 struct blk_plug plug;
1394
1395 start = untagged_addr(start);
1396
1397 if (!madvise_behavior_valid(behavior))
1398 return -EINVAL;
1399
1400 if (!PAGE_ALIGNED(start))
1401 return -EINVAL;
1402 len = PAGE_ALIGN(len_in);
1403
1404 /* Check to see whether len was rounded up from small -ve to zero */
1405 if (len_in && !len)
1406 return -EINVAL;
1407
1408 end = start + len;
1409 if (end < start)
1410 return -EINVAL;
1411
1412 if (end == start)
1413 return 0;
1414
1415 #ifdef CONFIG_MEMORY_FAILURE
1416 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1417 return madvise_inject_error(behavior, start, start + len_in);
1418 #endif
1419
1420 write = madvise_need_mmap_write(behavior);
1421 if (write) {
1422 if (mmap_write_lock_killable(mm))
1423 return -EINTR;
1424 } else {
1425 mmap_read_lock(mm);
1426 }
1427
1428 blk_start_plug(&plug);
1429 error = madvise_walk_vmas(mm, start, end, behavior,
1430 madvise_vma_behavior);
1431 blk_finish_plug(&plug);
1432 if (write)
1433 mmap_write_unlock(mm);
1434 else
1435 mmap_read_unlock(mm);
1436
1437 return error;
1438 }
1439
SYSCALL_DEFINE3(madvise,unsigned long,start,size_t,len_in,int,behavior)1440 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1441 {
1442 return do_madvise(current->mm, start, len_in, behavior);
1443 }
1444
SYSCALL_DEFINE5(process_madvise,int,pidfd,const struct iovec __user *,vec,size_t,vlen,int,behavior,unsigned int,flags)1445 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1446 size_t, vlen, int, behavior, unsigned int, flags)
1447 {
1448 ssize_t ret;
1449 struct iovec iovstack[UIO_FASTIOV], iovec;
1450 struct iovec *iov = iovstack;
1451 struct iov_iter iter;
1452 struct task_struct *task;
1453 struct mm_struct *mm;
1454 size_t total_len;
1455 unsigned int f_flags;
1456
1457 if (flags != 0) {
1458 ret = -EINVAL;
1459 goto out;
1460 }
1461
1462 ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1463 if (ret < 0)
1464 goto out;
1465
1466 task = pidfd_get_task(pidfd, &f_flags);
1467 if (IS_ERR(task)) {
1468 ret = PTR_ERR(task);
1469 goto free_iov;
1470 }
1471
1472 if (!process_madvise_behavior_valid(behavior)) {
1473 ret = -EINVAL;
1474 goto release_task;
1475 }
1476
1477 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1478 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1479 if (IS_ERR_OR_NULL(mm)) {
1480 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1481 goto release_task;
1482 }
1483
1484 /*
1485 * Require CAP_SYS_NICE for influencing process performance. Note that
1486 * only non-destructive hints are currently supported.
1487 */
1488 if (!capable(CAP_SYS_NICE)) {
1489 ret = -EPERM;
1490 goto release_mm;
1491 }
1492
1493 total_len = iov_iter_count(&iter);
1494
1495 while (iov_iter_count(&iter)) {
1496 iovec = iov_iter_iovec(&iter);
1497 ret = do_madvise(mm, (unsigned long)iovec.iov_base,
1498 iovec.iov_len, behavior);
1499 if (ret < 0)
1500 break;
1501 iov_iter_advance(&iter, iovec.iov_len);
1502 }
1503
1504 ret = (total_len - iov_iter_count(&iter)) ? : ret;
1505
1506 release_mm:
1507 mmput(mm);
1508 release_task:
1509 put_task_struct(task);
1510 free_iov:
1511 kfree(iov);
1512 out:
1513 return ret;
1514 }
1515