Lines Matching refs:folio
492 struct anon_vma *folio_get_anon_vma(struct folio *folio) in folio_get_anon_vma() argument
498 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma()
501 if (!folio_mapped(folio)) in folio_get_anon_vma()
517 if (!folio_mapped(folio)) { in folio_get_anon_vma()
536 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, in folio_lock_anon_vma_read() argument
544 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read()
547 if (!folio_mapped(folio)) in folio_lock_anon_vma_read()
558 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
577 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
749 struct folio *folio = page_folio(page); in page_address_in_vma() local
750 if (folio_test_anon(folio)) { in page_address_in_vma()
751 struct anon_vma *page__anon_vma = folio_anon_vma(folio); in page_address_in_vma()
761 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
806 static bool folio_referenced_one(struct folio *folio, in folio_referenced_one() argument
810 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one()
817 (!folio_test_large(folio) || !pvmw.pte)) { in folio_referenced_one()
819 mlock_vma_folio(folio, vma, !pvmw.pte); in folio_referenced_one()
858 folio_clear_idle(folio); in folio_referenced_one()
859 if (folio_test_clear_young(folio)) in folio_referenced_one()
896 int folio_referenced(struct folio *folio, int is_locked, in folio_referenced() argument
901 .mapcount = folio_mapcount(folio), in folio_referenced()
915 if (!folio_raw_mapping(folio)) in folio_referenced()
918 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { in folio_referenced()
919 we_locked = folio_trylock(folio); in folio_referenced()
933 rmap_walk(folio, &rwc); in folio_referenced()
937 folio_unlock(folio); in folio_referenced()
1012 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, in page_mkclean_one() argument
1015 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); in page_mkclean_one()
1031 int folio_mkclean(struct folio *folio) in folio_mkclean() argument
1041 BUG_ON(!folio_test_locked(folio)); in folio_mkclean()
1043 if (!folio_mapped(folio)) in folio_mkclean()
1046 mapping = folio_mapping(folio); in folio_mkclean()
1050 rmap_walk(folio, &rwc); in folio_mkclean()
1101 struct folio *folio = page_folio(page); in page_move_anon_rmap() local
1103 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_move_anon_rmap()
1112 WRITE_ONCE(folio->mapping, anon_vma); in page_move_anon_rmap()
1164 struct folio *folio = page_folio(page); in __page_check_anon_rmap() local
1176 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1177 folio); in __page_check_anon_rmap()
1470 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, in try_to_unmap_one() argument
1474 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_unmap_one()
1491 split_huge_pmd_address(vma, address, false, folio); in try_to_unmap_one()
1504 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1516 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_unmap_one()
1524 mlock_vma_folio(folio, vma, false); in try_to_unmap_one()
1530 subpage = folio_page(folio, in try_to_unmap_one()
1531 pte_pfn(*pvmw.pte) - folio_pfn(folio)); in try_to_unmap_one()
1533 anon_exclusive = folio_test_anon(folio) && in try_to_unmap_one()
1536 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1537 bool anon = folio_test_anon(folio); in try_to_unmap_one()
1621 folio_mark_dirty(folio); in try_to_unmap_one()
1628 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1629 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_unmap_one()
1632 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1647 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1651 } else if (folio_test_anon(folio)) { in try_to_unmap_one()
1658 if (unlikely(folio_test_swapbacked(folio) != in try_to_unmap_one()
1659 folio_test_swapcache(folio))) { in try_to_unmap_one()
1670 if (!folio_test_swapbacked(folio)) { in try_to_unmap_one()
1680 ref_count = folio_ref_count(folio); in try_to_unmap_one()
1681 map_count = folio_mapcount(folio); in try_to_unmap_one()
1694 !folio_test_dirty(folio)) { in try_to_unmap_one()
1707 folio_set_swapbacked(folio); in try_to_unmap_one()
1778 dec_mm_counter(mm, mm_counter_file(&folio->page)); in try_to_unmap_one()
1788 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); in try_to_unmap_one()
1791 folio_put(folio); in try_to_unmap_one()
1804 static int page_not_mapped(struct folio *folio) in page_not_mapped() argument
1806 return !folio_mapped(folio); in page_not_mapped()
1820 void try_to_unmap(struct folio *folio, enum ttu_flags flags) in try_to_unmap() argument
1830 rmap_walk_locked(folio, &rwc); in try_to_unmap()
1832 rmap_walk(folio, &rwc); in try_to_unmap()
1841 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, in try_to_migrate_one() argument
1845 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_migrate_one()
1866 split_huge_pmd_address(vma, address, true, folio); in try_to_migrate_one()
1879 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
1893 subpage = folio_page(folio, in try_to_migrate_one()
1894 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); in try_to_migrate_one()
1895 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in try_to_migrate_one()
1896 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one()
1908 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_migrate_one()
1910 if (folio_is_zone_device(folio)) { in try_to_migrate_one()
1921 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); in try_to_migrate_one()
1922 subpage = &folio->page; in try_to_migrate_one()
1924 subpage = folio_page(folio, in try_to_migrate_one()
1925 pte_pfn(*pvmw.pte) - folio_pfn(folio)); in try_to_migrate_one()
1928 anon_exclusive = folio_test_anon(folio) && in try_to_migrate_one()
1931 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
1932 bool anon = folio_test_anon(folio); in try_to_migrate_one()
1992 folio_mark_dirty(folio); in try_to_migrate_one()
1997 if (folio_is_device_private(folio)) { in try_to_migrate_one()
1998 unsigned long pfn = folio_pfn(folio); in try_to_migrate_one()
2029 compound_order(&folio->page)); in try_to_migrate_one()
2036 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2037 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_migrate_one()
2040 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2055 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2064 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2072 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && in try_to_migrate_one()
2078 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2110 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2115 compound_order(&folio->page)); in try_to_migrate_one()
2129 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); in try_to_migrate_one()
2132 folio_put(folio); in try_to_migrate_one()
2148 void try_to_migrate(struct folio *folio, enum ttu_flags flags) in try_to_migrate() argument
2165 if (folio_is_zone_device(folio) && in try_to_migrate()
2166 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) in try_to_migrate()
2177 if (!folio_test_ksm(folio) && folio_test_anon(folio)) in try_to_migrate()
2181 rmap_walk_locked(folio, &rwc); in try_to_migrate()
2183 rmap_walk(folio, &rwc); in try_to_migrate()
2194 static bool page_make_device_exclusive_one(struct folio *folio, in page_make_device_exclusive_one() argument
2198 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in page_make_device_exclusive_one()
2209 address + folio_size(folio)), in page_make_device_exclusive_one()
2215 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in page_make_device_exclusive_one()
2223 subpage = folio_page(folio, in page_make_device_exclusive_one()
2224 pte_pfn(*pvmw.pte) - folio_pfn(folio)); in page_make_device_exclusive_one()
2233 folio_mark_dirty(folio); in page_make_device_exclusive_one()
2289 static bool folio_make_device_exclusive(struct folio *folio, in folio_make_device_exclusive() argument
2309 if (!folio_test_anon(folio)) in folio_make_device_exclusive()
2312 rmap_walk(folio, &rwc); in folio_make_device_exclusive()
2314 return args.valid && !folio_mapcount(folio); in folio_make_device_exclusive()
2352 struct folio *folio = page_folio(pages[i]); in make_device_exclusive_range() local
2353 if (PageTail(pages[i]) || !folio_trylock(folio)) { in make_device_exclusive_range()
2354 folio_put(folio); in make_device_exclusive_range()
2359 if (!folio_make_device_exclusive(folio, mm, start, owner)) { in make_device_exclusive_range()
2360 folio_unlock(folio); in make_device_exclusive_range()
2361 folio_put(folio); in make_device_exclusive_range()
2380 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, in rmap_walk_anon_lock() argument
2386 return rwc->anon_lock(folio, rwc); in rmap_walk_anon_lock()
2394 anon_vma = folio_anon_vma(folio); in rmap_walk_anon_lock()
2421 static void rmap_walk_anon(struct folio *folio, in rmap_walk_anon() argument
2429 anon_vma = folio_anon_vma(folio); in rmap_walk_anon()
2431 VM_BUG_ON_FOLIO(!anon_vma, folio); in rmap_walk_anon()
2433 anon_vma = rmap_walk_anon_lock(folio, rwc); in rmap_walk_anon()
2438 pgoff_start = folio_pgoff(folio); in rmap_walk_anon()
2439 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_anon()
2443 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_anon()
2451 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2453 if (rwc->done && rwc->done(folio)) in rmap_walk_anon()
2469 static void rmap_walk_file(struct folio *folio, in rmap_walk_file() argument
2472 struct address_space *mapping = folio_mapping(folio); in rmap_walk_file()
2482 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in rmap_walk_file()
2487 pgoff_start = folio_pgoff(folio); in rmap_walk_file()
2488 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_file()
2503 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_file()
2511 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_file()
2513 if (rwc->done && rwc->done(folio)) in rmap_walk_file()
2522 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk() argument
2524 if (unlikely(folio_test_ksm(folio))) in rmap_walk()
2525 rmap_walk_ksm(folio, rwc); in rmap_walk()
2526 else if (folio_test_anon(folio)) in rmap_walk()
2527 rmap_walk_anon(folio, rwc, false); in rmap_walk()
2529 rmap_walk_file(folio, rwc, false); in rmap_walk()
2533 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2536 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); in rmap_walk_locked()
2537 if (folio_test_anon(folio)) in rmap_walk_locked()
2538 rmap_walk_anon(folio, rwc, true); in rmap_walk_locked()
2540 rmap_walk_file(folio, rwc, true); in rmap_walk_locked()