Lines Matching refs:page
486 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
492 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
495 if (!page_mapped(page)) in page_get_anon_vma()
511 if (!page_mapped(page)) { in page_get_anon_vma()
529 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
536 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
539 if (!page_mapped(page)) in page_lock_anon_vma_read()
550 if (!page_mapped(page)) { in page_lock_anon_vma_read()
563 if (!page_mapped(page)) { in page_lock_anon_vma_read()
709 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
711 if (PageAnon(page)) { in page_address_in_vma()
712 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
722 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma()
726 return vma_address(page, vma); in page_address_in_vma()
772 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
777 .page = page, in page_referenced_one()
819 clear_page_idle(page); in page_referenced_one()
820 if (test_and_clear_page_young(page)) in page_referenced_one()
855 int page_referenced(struct page *page, in page_referenced() argument
862 .mapcount = total_mapcount(page), in page_referenced()
875 if (!page_rmapping(page)) in page_referenced()
878 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
879 we_locked = trylock_page(page); in page_referenced()
893 rmap_walk(page, &rwc); in page_referenced()
897 unlock_page(page); in page_referenced()
902 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
906 .page = page, in page_mkclean_one()
920 vma_address_end(page, vma)); in page_mkclean_one()
948 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
984 int page_mkclean(struct page *page) in page_mkclean() argument
994 BUG_ON(!PageLocked(page)); in page_mkclean()
996 if (!page_mapped(page)) in page_mkclean()
999 mapping = page_mapping(page); in page_mkclean()
1003 rmap_walk(page, &rwc); in page_mkclean()
1019 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1023 page = compound_head(page); in page_move_anon_rmap()
1025 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1034 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1044 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1051 if (PageAnon(page)) in __page_set_anon_rmap()
1069 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in __page_set_anon_rmap()
1070 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1079 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1093 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); in __page_check_anon_rmap()
1094 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1095 page); in __page_check_anon_rmap()
1110 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1113 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1121 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1127 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1128 lock_page_memcg(page); in do_page_add_anon_rmap()
1130 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1134 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1135 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in do_page_add_anon_rmap()
1136 mapcount = compound_mapcount_ptr(page); in do_page_add_anon_rmap()
1139 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1143 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap()
1151 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); in do_page_add_anon_rmap()
1152 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1155 if (unlikely(PageKsm(page))) { in do_page_add_anon_rmap()
1156 unlock_page_memcg(page); in do_page_add_anon_rmap()
1162 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1165 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1179 void page_add_new_anon_rmap(struct page *page, in page_add_new_anon_rmap() argument
1182 int nr = compound ? thp_nr_pages(page) : 1; in page_add_new_anon_rmap()
1185 __SetPageSwapBacked(page); in page_add_new_anon_rmap()
1187 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in page_add_new_anon_rmap()
1189 atomic_set(compound_mapcount_ptr(page), 0); in page_add_new_anon_rmap()
1190 if (hpage_pincount_available(page)) in page_add_new_anon_rmap()
1191 atomic_set(compound_pincount_ptr(page), 0); in page_add_new_anon_rmap()
1193 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); in page_add_new_anon_rmap()
1196 VM_BUG_ON_PAGE(PageTransCompound(page), page); in page_add_new_anon_rmap()
1198 atomic_set(&page->_mapcount, 0); in page_add_new_anon_rmap()
1200 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in page_add_new_anon_rmap()
1201 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1211 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1215 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1216 lock_page_memcg(page); in page_add_file_rmap()
1217 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1218 int nr_pages = thp_nr_pages(page); in page_add_file_rmap()
1221 if (atomic_inc_and_test(&page[i]._mapcount)) in page_add_file_rmap()
1224 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) in page_add_file_rmap()
1226 if (PageSwapBacked(page)) in page_add_file_rmap()
1227 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, in page_add_file_rmap()
1230 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, in page_add_file_rmap()
1233 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1234 struct page *head = compound_head(page); in page_add_file_rmap()
1236 VM_WARN_ON_ONCE(!PageLocked(page)); in page_add_file_rmap()
1239 if (PageMlocked(page)) in page_add_file_rmap()
1242 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1245 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1247 unlock_page_memcg(page); in page_add_file_rmap()
1250 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1254 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1257 if (unlikely(PageHuge(page))) { in page_remove_file_rmap()
1259 atomic_dec(compound_mapcount_ptr(page)); in page_remove_file_rmap()
1264 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1265 int nr_pages = thp_nr_pages(page); in page_remove_file_rmap()
1268 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1271 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1273 if (PageSwapBacked(page)) in page_remove_file_rmap()
1274 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, in page_remove_file_rmap()
1277 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, in page_remove_file_rmap()
1280 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1289 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1291 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1292 clear_page_mlock(page); in page_remove_file_rmap()
1295 static void page_remove_anon_compound_rmap(struct page *page) in page_remove_anon_compound_rmap() argument
1299 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1303 if (unlikely(PageHuge(page))) in page_remove_anon_compound_rmap()
1309 __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); in page_remove_anon_compound_rmap()
1311 if (TestClearPageDoubleMap(page)) { in page_remove_anon_compound_rmap()
1316 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_anon_compound_rmap()
1317 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1326 if (nr && nr < thp_nr_pages(page)) in page_remove_anon_compound_rmap()
1327 deferred_split_huge_page(page); in page_remove_anon_compound_rmap()
1329 nr = thp_nr_pages(page); in page_remove_anon_compound_rmap()
1332 if (unlikely(PageMlocked(page))) in page_remove_anon_compound_rmap()
1333 clear_page_mlock(page); in page_remove_anon_compound_rmap()
1336 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1346 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1348 lock_page_memcg(page); in page_remove_rmap()
1350 if (!PageAnon(page)) { in page_remove_rmap()
1351 page_remove_file_rmap(page, compound); in page_remove_rmap()
1356 page_remove_anon_compound_rmap(page); in page_remove_rmap()
1361 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1369 __dec_lruvec_page_state(page, NR_ANON_MAPPED); in page_remove_rmap()
1371 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1372 clear_page_mlock(page); in page_remove_rmap()
1374 if (PageTransCompound(page)) in page_remove_rmap()
1375 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
1387 unlock_page_memcg(page); in page_remove_rmap()
1393 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1398 .page = page, in try_to_unmap_one()
1403 struct page *subpage; in try_to_unmap_one()
1418 split_huge_pmd_address(vma, address, false, page); in try_to_unmap_one()
1428 range.end = PageKsm(page) ? in try_to_unmap_one()
1429 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_unmap_one()
1432 if (PageHuge(page)) { in try_to_unmap_one()
1454 if (!PageTransCompound(page) || (PageHead(page) && in try_to_unmap_one()
1455 !PageDoubleMap(page) && !PageAnon(page))) in try_to_unmap_one()
1456 mlock_vma_page(page); in try_to_unmap_one()
1463 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
1465 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1468 if (PageHuge(page) && !PageAnon(page)) { in try_to_unmap_one()
1522 set_page_dirty(page); in try_to_unmap_one()
1527 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1529 if (PageHuge(page)) { in try_to_unmap_one()
1530 hugetlb_count_sub(compound_nr(page), mm); in try_to_unmap_one()
1535 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1550 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1554 } else if (PageAnon(page)) { in try_to_unmap_one()
1561 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { in try_to_unmap_one()
1572 if (!PageSwapBacked(page)) { in try_to_unmap_one()
1573 if (!PageDirty(page)) { in try_to_unmap_one()
1586 SetPageSwapBacked(page); in try_to_unmap_one()
1632 dec_mm_counter(mm, mm_counter_file(page)); in try_to_unmap_one()
1642 page_remove_rmap(subpage, PageHuge(page)); in try_to_unmap_one()
1643 put_page(page); in try_to_unmap_one()
1656 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1658 return !page_mapped(page); in page_not_mapped()
1672 void try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1682 rmap_walk_locked(page, &rwc); in try_to_unmap()
1684 rmap_walk(page, &rwc); in try_to_unmap()
1693 static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, in try_to_migrate_one() argument
1698 .page = page, in try_to_migrate_one()
1703 struct page *subpage; in try_to_migrate_one()
1722 split_huge_pmd_address(vma, address, true, page); in try_to_migrate_one()
1732 range.end = PageKsm(page) ? in try_to_migrate_one()
1733 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_migrate_one()
1736 if (PageHuge(page)) { in try_to_migrate_one()
1750 VM_BUG_ON_PAGE(PageHuge(page) || in try_to_migrate_one()
1751 !PageTransCompound(page), page); in try_to_migrate_one()
1753 set_pmd_migration_entry(&pvmw, page); in try_to_migrate_one()
1759 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_migrate_one()
1761 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_migrate_one()
1764 if (PageHuge(page) && !PageAnon(page)) { in try_to_migrate_one()
1804 set_page_dirty(page); in try_to_migrate_one()
1809 if (is_zone_device_page(page)) { in try_to_migrate_one()
1819 page_to_pfn(page)); in try_to_migrate_one()
1842 subpage = page; in try_to_migrate_one()
1843 } else if (PageHWPoison(page)) { in try_to_migrate_one()
1845 if (PageHuge(page)) { in try_to_migrate_one()
1846 hugetlb_count_sub(compound_nr(page), mm); in try_to_migrate_one()
1851 dec_mm_counter(mm, mm_counter(page)); in try_to_migrate_one()
1866 dec_mm_counter(mm, mm_counter(page)); in try_to_migrate_one()
1912 page_remove_rmap(subpage, PageHuge(page)); in try_to_migrate_one()
1913 put_page(page); in try_to_migrate_one()
1929 void try_to_migrate(struct page *page, enum ttu_flags flags) in try_to_migrate() argument
1946 if (is_zone_device_page(page) && !is_device_private_page(page)) in try_to_migrate()
1957 if (!PageKsm(page) && PageAnon(page)) in try_to_migrate()
1961 rmap_walk_locked(page, &rwc); in try_to_migrate()
1963 rmap_walk(page, &rwc); in try_to_migrate()
1970 static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, in page_mlock_one() argument
1974 .page = page, in page_mlock_one()
1996 mlock_vma_page(page); in page_mlock_one()
2017 void page_mlock(struct page *page) in page_mlock() argument
2026 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in page_mlock()
2027 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in page_mlock()
2030 if (PageTransCompound(page) && PageAnon(page)) in page_mlock()
2033 rmap_walk(page, &rwc); in page_mlock()
2044 static bool page_make_device_exclusive_one(struct page *page, in page_make_device_exclusive_one() argument
2049 .page = page, in page_make_device_exclusive_one()
2055 struct page *subpage; in page_make_device_exclusive_one()
2063 address + page_size(page)), args->owner); in page_make_device_exclusive_one()
2068 VM_BUG_ON_PAGE(!pvmw.pte, page); in page_make_device_exclusive_one()
2076 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in page_make_device_exclusive_one()
2085 set_page_dirty(page); in page_make_device_exclusive_one()
2140 static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm, in page_make_device_exclusive() argument
2161 if (!PageAnon(page) || PageTail(page)) in page_make_device_exclusive()
2164 rmap_walk(page, &rwc); in page_make_device_exclusive()
2166 return args.valid && !page_mapcount(page); in page_make_device_exclusive()
2191 unsigned long end, struct page **pages, in make_device_exclusive_range()
2231 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
2237 return rwc->anon_lock(page); in rmap_walk_anon_lock()
2245 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
2267 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_anon() argument
2275 anon_vma = page_anon_vma(page); in rmap_walk_anon()
2277 VM_BUG_ON_PAGE(!anon_vma, page); in rmap_walk_anon()
2279 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
2284 pgoff_start = page_to_pgoff(page); in rmap_walk_anon()
2285 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_anon()
2289 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
2297 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
2299 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
2320 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_file() argument
2323 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
2333 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
2338 pgoff_start = page_to_pgoff(page); in rmap_walk_file()
2339 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_file()
2344 unsigned long address = vma_address(page, vma); in rmap_walk_file()
2352 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
2354 if (rwc->done && rwc->done(page)) in rmap_walk_file()
2363 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
2365 if (unlikely(PageKsm(page))) in rmap_walk()
2366 rmap_walk_ksm(page, rwc); in rmap_walk()
2367 else if (PageAnon(page)) in rmap_walk()
2368 rmap_walk_anon(page, rwc, false); in rmap_walk()
2370 rmap_walk_file(page, rwc, false); in rmap_walk()
2374 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2377 VM_BUG_ON_PAGE(PageKsm(page), page); in rmap_walk_locked()
2378 if (PageAnon(page)) in rmap_walk_locked()
2379 rmap_walk_anon(page, rwc, true); in rmap_walk_locked()
2381 rmap_walk_file(page, rwc, true); in rmap_walk_locked()
2390 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
2396 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
2399 first = atomic_inc_and_test(compound_mapcount_ptr(page)); in hugepage_add_anon_rmap()
2401 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
2404 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
2408 atomic_set(compound_mapcount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2409 if (hpage_pincount_available(page)) in hugepage_add_new_anon_rmap()
2410 atomic_set(compound_pincount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2412 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()