Lines Matching +full:wp +full:- +full:content

1 // SPDX-License-Identifier: GPL-2.0
105 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
113 /* pte-mapped THP in this mm */
119 * struct khugepaged_scan - cursor for scanning
153 return -EINVAL; in scan_sleep_millisecs_store()
180 return -EINVAL; in alloc_sleep_millisecs_store()
206 return -EINVAL; in pages_to_scan_store()
271 if (err || max_ptes_none > HPAGE_PMD_NR - 1) in max_ptes_none_store()
272 return -EINVAL; in max_ptes_none_store()
296 if (err || max_ptes_swap > HPAGE_PMD_NR - 1) in max_ptes_swap_store()
297 return -EINVAL; in max_ptes_swap_store()
322 if (err || max_ptes_shared > HPAGE_PMD_NR - 1) in max_ptes_shared_store()
323 return -EINVAL; in max_ptes_shared_store()
363 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
396 return -ENOMEM; in khugepaged_init()
399 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; in khugepaged_init()
413 return atomic_read(&mm->mm_users) == 0; in hpage_collapse_test_exit()
426 slot = &mm_slot->slot; in __khugepaged_enter()
430 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { in __khugepaged_enter()
442 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head); in __khugepaged_enter()
453 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && in khugepaged_enter_vma()
456 __khugepaged_enter(vma->vm_mm); in khugepaged_enter_vma()
470 hash_del(&slot->hash); in __khugepaged_exit()
471 list_del(&slot->mm_node); in __khugepaged_exit()
477 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in __khugepaged_exit()
497 -compound_nr(page)); in release_pte_page()
507 while (--_pte >= pte) { in release_pte_pages()
517 list_del(&page->lru); in release_pte_pages()
551 (!cc->is_khugepaged || in __collapse_huge_page_isolate()
574 if (cc->is_khugepaged && in __collapse_huge_page_isolate()
640 list_add_tail(&page->lru, compound_pagelist); in __collapse_huge_page_isolate()
646 if (cc->is_khugepaged && in __collapse_huge_page_isolate()
648 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, in __collapse_huge_page_isolate()
658 } else if (unlikely(cc->is_khugepaged && !referenced)) { in __collapse_huge_page_isolate()
687 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
693 ptep_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
703 * be disabled to update the per-cpu stats in __collapse_huge_page_copy()
707 ptep_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
715 list_del(&src_page->lru); in __collapse_huge_page_copy()
718 -compound_nr(src_page)); in __collapse_huge_page_copy()
751 if (cc->node_load[nid]) in hpage_collapse_scan_abort()
755 if (!cc->node_load[i]) in hpage_collapse_scan_abort()
780 if (cc->node_load[nid] > max_value) { in hpage_collapse_find_target_node()
781 max_value = cc->node_load[nid]; in hpage_collapse_find_target_node()
786 if (max_value == cc->node_load[nid]) in hpage_collapse_find_target_node()
787 node_set(nid, cc->alloc_nmask); in hpage_collapse_find_target_node()
835 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, in hugepage_vma_revalidate()
836 cc->is_khugepaged)) in hugepage_vma_revalidate()
845 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) in hugepage_vma_revalidate()
953 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : in alloc_charge_hpage()
957 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) in alloc_charge_hpage()
1033 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
1072 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1080 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1094 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); in collapse_huge_page()
1141 memset(cc->node_load, 0, sizeof(cc->node_load)); in hpage_collapse_scan_pmd()
1142 nodes_clear(cc->alloc_nmask); in hpage_collapse_scan_pmd()
1149 if (!cc->is_khugepaged || in hpage_collapse_scan_pmd()
1152 * Always be strict with uffd-wp in hpage_collapse_scan_pmd()
1170 (!cc->is_khugepaged || in hpage_collapse_scan_pmd()
1203 if (cc->is_khugepaged && in hpage_collapse_scan_pmd()
1215 * information to cc->node_load[]. in hpage_collapse_scan_pmd()
1224 cc->node_load[node]++; in hpage_collapse_scan_pmd()
1264 if (cc->is_khugepaged && in hpage_collapse_scan_pmd()
1266 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, in hpage_collapse_scan_pmd()
1272 } else if (cc->is_khugepaged && in hpage_collapse_scan_pmd()
1295 struct mm_slot *slot = &mm_slot->slot; in collect_mm_slot()
1296 struct mm_struct *mm = slot->mm; in collect_mm_slot()
1302 hash_del(&slot->hash); in collect_mm_slot()
1303 list_del(&slot->mm_node); in collect_mm_slot()
1308 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in collect_mm_slot()
1319 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1324 * emptying the A's ->pte_mapped_thp[] array.
1328 * ->pte-mapped_thp[] array.
1330 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1331 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1339 * "multiple-add" is thought to be more expensive than just handling it, should
1354 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) { in khugepaged_add_pte_mapped_thp()
1355 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; in khugepaged_add_pte_mapped_thp()
1374 mmap_assert_write_locked(vma->vm_mm); in set_huge_pmd()
1388 * - modifying terminal entries (ones that point to a data page, not to another
1390 * - installing *new* non-terminal entries
1405 if (vma->vm_file) in collapse_and_free_pmd()
1406 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem); in collapse_and_free_pmd()
1411 if (vma->anon_vma) in collapse_and_free_pmd()
1412 lockdep_assert_held_write(&vma->anon_vma->root->rwsem); in collapse_and_free_pmd()
1426 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1435 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1451 /* Fast check before locking page if already PMD-mapped */ in collapse_pte_mapped_thp()
1456 if (!vma || !vma->vm_file || in collapse_pte_mapped_thp()
1462 * in the page cache with a single hugepage. If a mm were to fault-in in collapse_pte_mapped_thp()
1467 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) in collapse_pte_mapped_thp()
1475 if (vma->anon_vma) in collapse_pte_mapped_thp()
1478 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ in collapse_pte_mapped_thp()
1482 hpage = find_lock_page(vma->vm_file->f_mapping, in collapse_pte_mapped_thp()
1512 * We need to lock the mapping so that from here on, only GUP-fast and in collapse_pte_mapped_thp()
1517 i_mmap_lock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1523 * tables while all the high-level locks are held in write mode. in collapse_pte_mapped_thp()
1573 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); in collapse_pte_mapped_thp()
1579 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1594 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1600 struct mm_slot *slot = &mm_slot->slot; in khugepaged_collapse_pte_mapped_thps()
1601 struct mm_struct *mm = slot->mm; in khugepaged_collapse_pte_mapped_thps()
1604 if (likely(mm_slot->nr_pte_mapped_thp == 0)) in khugepaged_collapse_pte_mapped_thps()
1613 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) in khugepaged_collapse_pte_mapped_thps()
1614 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false); in khugepaged_collapse_pte_mapped_thps()
1617 mm_slot->nr_pte_mapped_thp = 0; in khugepaged_collapse_pte_mapped_thps()
1630 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables()
1638 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that in retract_page_tables()
1640 * mmap_write_lock(mm) as PMD-mapping is likely to be split in retract_page_tables()
1643 * Note that vma->anon_vma check is racy: it can be set up after in retract_page_tables()
1654 if (vma->anon_vma) { in retract_page_tables()
1658 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in retract_page_tables()
1660 vma->vm_end < addr + HPAGE_PMD_SIZE) { in retract_page_tables()
1664 mm = vma->vm_mm; in retract_page_tables()
1677 * mappings - let khugepaged take care of them later. in retract_page_tables()
1680 if ((cc->is_khugepaged || is_target) && in retract_page_tables()
1683 * When a vma is registered with uffd-wp, we can't in retract_page_tables()
1687 * it'll always mapped in small page size for uffd-wp in retract_page_tables()
1699 if (!cc->is_khugepaged && is_target) in retract_page_tables()
1725 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1734 * - allocate and lock a new huge page;
1735 * - scan page cache replacing old pages with the new one
1739 * - if replacing succeeds:
1743 * - if replacing failed;
1752 struct address_space *mapping = file->f_mapping; in collapse_file()
1756 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file()
1762 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); in collapse_file()
1770 * almost certainly a no-op because most of the pages must be present in collapse_file()
1787 hpage->index = start; in collapse_file()
1788 hpage->mapping = mapping; in collapse_file()
1791 * At this point the hpage is locked and not up-to-date. in collapse_file()
1805 * hole-punched, and is now completely in collapse_file()
1809 if (!xas_next_entry(&xas, end - 1)) { in collapse_file()
1815 if (!shmem_charge(mapping->host, 1)) { in collapse_file()
1829 if (shmem_get_folio(mapping->host, index, in collapse_file()
1845 page_cache_sync_readahead(mapping, &file->f_ra, in collapse_file()
1847 end - index); in collapse_file()
1857 * khugepaged only works on read-only fd, in collapse_file()
1866 * This is a one-off situation. We are not in collapse_file()
1899 * If file was truncated then extended, or hole-punched, before in collapse_file()
1907 head->index == start in collapse_file()
1908 /* Maybe PMD-mapped */ in collapse_file()
1922 * khugepaged only works on read-only fd, so this in collapse_file()
1953 * - we hold a pin on it; in collapse_file()
1954 * - one reference from page cache; in collapse_file()
1955 * - one from isolate_lru_page; in collapse_file()
1968 list_add_tail(&page->lru, &pagelist); in collapse_file()
1992 if (inode_is_open_for_write(mapping->host)) { in collapse_file()
1994 __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr); in collapse_file()
2002 /* nr_none is always 0 for non-shmem. */ in collapse_file()
2006 /* Join all the small entries into a single multi-index entry */ in collapse_file()
2025 * need to copy the content and free the old pages. in collapse_file()
2029 while (index < page->index) { in collapse_file()
2033 copy_highpage(hpage + (page->index % HPAGE_PMD_NR), in collapse_file()
2035 list_del(&page->lru); in collapse_file()
2036 page->mapping = NULL; in collapse_file()
2050 page_ref_add(hpage, HPAGE_PMD_NR - 1); in collapse_file()
2056 * Remove pte page tables, so we can re-fault the page as huge. in collapse_file()
2068 mapping->nrpages -= nr_none; in collapse_file()
2069 shmem_uncharge(mapping->host, nr_none); in collapse_file()
2073 xas_for_each(&xas, page, end - 1) { in collapse_file()
2076 if (!page || xas.xa_index < page->index) { in collapse_file()
2079 nr_none--; in collapse_file()
2085 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); in collapse_file()
2088 list_del(&page->lru); in collapse_file()
2100 hpage->mapping = NULL; in collapse_file()
2120 struct address_space *mapping = file->f_mapping; in hpage_collapse_scan_file()
2121 XA_STATE(xas, &mapping->i_pages, start); in hpage_collapse_scan_file()
2128 memset(cc->node_load, 0, sizeof(cc->node_load)); in hpage_collapse_scan_file()
2129 nodes_clear(cc->alloc_nmask); in hpage_collapse_scan_file()
2131 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { in hpage_collapse_scan_file()
2137 if (cc->is_khugepaged && in hpage_collapse_scan_file()
2154 head->index == start in hpage_collapse_scan_file()
2155 /* Maybe PMD-mapped */ in hpage_collapse_scan_file()
2172 cc->node_load[node]++; in hpage_collapse_scan_file()
2201 if (cc->is_khugepaged && in hpage_collapse_scan_file()
2202 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { in hpage_collapse_scan_file()
2250 slot = &mm_slot->slot; in khugepaged_scan_mm_slot()
2261 mm = slot->mm; in khugepaged_scan_mm_slot()
2283 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { in khugepaged_scan_mm_slot()
2288 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); in khugepaged_scan_mm_slot()
2289 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); in khugepaged_scan_mm_slot()
2306 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in khugepaged_scan_mm_slot()
2307 struct file *file = get_file(vma->vm_file); in khugepaged_scan_mm_slot()
2375 if (slot->mm_node.next != &khugepaged_scan.mm_head) { in khugepaged_scan_mm_slot()
2376 slot = list_entry(slot->mm_node.next, in khugepaged_scan_mm_slot()
2424 progress += khugepaged_scan_mm_slot(pages - progress, in khugepaged_do_scan()
2530 recommended_min <<= (PAGE_SHIFT-10); in set_recommended_min_free_kbytes()
2589 return -ENOMEM; in madvise_collapse_errno()
2591 return -EBUSY; in madvise_collapse_errno()
2592 /* Resource temporary unavailable - trying again might succeed */ in madvise_collapse_errno()
2596 return -EAGAIN; in madvise_collapse_errno()
2603 return -EINVAL; in madvise_collapse_errno()
2611 struct mm_struct *mm = vma->vm_mm; in madvise_collapse()
2616 BUG_ON(vma->vm_start > start); in madvise_collapse()
2617 BUG_ON(vma->vm_end < end); in madvise_collapse()
2621 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) in madvise_collapse()
2622 return -EINVAL; in madvise_collapse()
2626 return -ENOMEM; in madvise_collapse()
2627 cc->is_khugepaged = false; in madvise_collapse()
2649 hend = vma->vm_end & HPAGE_PMD_MASK; in madvise_collapse()
2652 memset(cc->node_load, 0, sizeof(cc->node_load)); in madvise_collapse()
2653 nodes_clear(cc->alloc_nmask); in madvise_collapse()
2654 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in madvise_collapse()
2655 struct file *file = get_file(vma->vm_file); in madvise_collapse()
2713 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0 in madvise_collapse()