Lines Matching refs:page

465 struct anon_vma *page_get_anon_vma(struct page *page)  in page_get_anon_vma()  argument
471 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
474 if (!page_mapped(page)) in page_get_anon_vma()
490 if (!page_mapped(page)) { in page_get_anon_vma()
508 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
515 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
518 if (!page_mapped(page)) in page_lock_anon_vma_read()
529 if (!page_mapped(page)) { in page_lock_anon_vma_read()
542 if (!page_mapped(page)) { in page_lock_anon_vma_read()
688 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
691 if (PageAnon(page)) { in page_address_in_vma()
692 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
700 } else if (page->mapping) { in page_address_in_vma()
701 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
705 address = __vma_address(page, vma); in page_address_in_vma()
754 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
759 .page = page, in page_referenced_one()
801 clear_page_idle(page); in page_referenced_one()
802 if (test_and_clear_page_young(page)) in page_referenced_one()
837 int page_referenced(struct page *page, in page_referenced() argument
844 .mapcount = total_mapcount(page), in page_referenced()
857 if (!page_rmapping(page)) in page_referenced()
860 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
861 we_locked = trylock_page(page); in page_referenced()
875 rmap_walk(page, &rwc); in page_referenced()
879 unlock_page(page); in page_referenced()
884 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
888 .page = page, in page_mkclean_one()
902 min(vma->vm_end, address + page_size(page))); in page_mkclean_one()
930 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
966 int page_mkclean(struct page *page) in page_mkclean() argument
976 BUG_ON(!PageLocked(page)); in page_mkclean()
978 if (!page_mapped(page)) in page_mkclean()
981 mapping = page_mapping(page); in page_mkclean()
985 rmap_walk(page, &rwc); in page_mkclean()
1001 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1005 page = compound_head(page); in page_move_anon_rmap()
1007 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1016 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1026 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1033 if (PageAnon(page)) in __page_set_anon_rmap()
1045 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1046 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1055 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1071 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1072 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); in __page_check_anon_rmap()
1088 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1091 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1099 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1107 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1108 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in do_page_add_anon_rmap()
1109 mapcount = compound_mapcount_ptr(page); in do_page_add_anon_rmap()
1112 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1116 int nr = compound ? hpage_nr_pages(page) : 1; in do_page_add_anon_rmap()
1124 __inc_node_page_state(page, NR_ANON_THPS); in do_page_add_anon_rmap()
1125 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1127 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1130 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1134 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1137 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1151 void page_add_new_anon_rmap(struct page *page, in page_add_new_anon_rmap() argument
1154 int nr = compound ? hpage_nr_pages(page) : 1; in page_add_new_anon_rmap()
1157 __SetPageSwapBacked(page); in page_add_new_anon_rmap()
1159 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in page_add_new_anon_rmap()
1161 atomic_set(compound_mapcount_ptr(page), 0); in page_add_new_anon_rmap()
1162 __inc_node_page_state(page, NR_ANON_THPS); in page_add_new_anon_rmap()
1165 VM_BUG_ON_PAGE(PageTransCompound(page), page); in page_add_new_anon_rmap()
1167 atomic_set(&page->_mapcount, 0); in page_add_new_anon_rmap()
1169 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); in page_add_new_anon_rmap()
1170 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1180 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1184 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1185 lock_page_memcg(page); in page_add_file_rmap()
1186 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1188 if (atomic_inc_and_test(&page[i]._mapcount)) in page_add_file_rmap()
1191 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) in page_add_file_rmap()
1193 if (PageSwapBacked(page)) in page_add_file_rmap()
1194 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_add_file_rmap()
1196 __inc_node_page_state(page, NR_FILE_PMDMAPPED); in page_add_file_rmap()
1198 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1199 VM_WARN_ON_ONCE(!PageLocked(page)); in page_add_file_rmap()
1201 SetPageDoubleMap(compound_head(page)); in page_add_file_rmap()
1202 if (PageMlocked(page)) in page_add_file_rmap()
1203 clear_page_mlock(compound_head(page)); in page_add_file_rmap()
1205 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1208 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1210 unlock_page_memcg(page); in page_add_file_rmap()
1213 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1217 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1218 lock_page_memcg(page); in page_remove_file_rmap()
1221 if (unlikely(PageHuge(page))) { in page_remove_file_rmap()
1223 atomic_dec(compound_mapcount_ptr(page)); in page_remove_file_rmap()
1228 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1230 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1233 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1235 if (PageSwapBacked(page)) in page_remove_file_rmap()
1236 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_remove_file_rmap()
1238 __dec_node_page_state(page, NR_FILE_PMDMAPPED); in page_remove_file_rmap()
1240 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1249 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1251 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1252 clear_page_mlock(page); in page_remove_file_rmap()
1254 unlock_page_memcg(page); in page_remove_file_rmap()
1257 static void page_remove_anon_compound_rmap(struct page *page) in page_remove_anon_compound_rmap() argument
1261 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1265 if (unlikely(PageHuge(page))) in page_remove_anon_compound_rmap()
1271 __dec_node_page_state(page, NR_ANON_THPS); in page_remove_anon_compound_rmap()
1273 if (TestClearPageDoubleMap(page)) { in page_remove_anon_compound_rmap()
1279 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1286 if (unlikely(PageMlocked(page))) in page_remove_anon_compound_rmap()
1287 clear_page_mlock(page); in page_remove_anon_compound_rmap()
1290 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1291 deferred_split_huge_page(page); in page_remove_anon_compound_rmap()
1302 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1304 if (!PageAnon(page)) in page_remove_rmap()
1305 return page_remove_file_rmap(page, compound); in page_remove_rmap()
1308 return page_remove_anon_compound_rmap(page); in page_remove_rmap()
1311 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1319 __dec_node_page_state(page, NR_ANON_MAPPED); in page_remove_rmap()
1321 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1322 clear_page_mlock(page); in page_remove_rmap()
1324 if (PageTransCompound(page)) in page_remove_rmap()
1325 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
1341 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1346 .page = page, in try_to_unmap_one()
1351 struct page *subpage; in try_to_unmap_one()
1361 is_zone_device_page(page) && !is_device_private_page(page)) in try_to_unmap_one()
1366 flags & TTU_SPLIT_FREEZE, page); in try_to_unmap_one()
1379 min(vma->vm_end, address + page_size(page))); in try_to_unmap_one()
1380 if (PageHuge(page)) { in try_to_unmap_one()
1394 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in try_to_unmap_one()
1396 set_pmd_migration_entry(&pvmw, page); in try_to_unmap_one()
1409 if (!PageTransCompound(page)) { in try_to_unmap_one()
1414 mlock_vma_page(page); in try_to_unmap_one()
1425 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
1427 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1430 if (PageHuge(page)) { in try_to_unmap_one()
1460 is_zone_device_page(page)) { in try_to_unmap_one()
1471 entry = make_migration_entry(page, 0); in try_to_unmap_one()
1487 subpage = page; in try_to_unmap_one()
1520 set_page_dirty(page); in try_to_unmap_one()
1525 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1527 if (PageHuge(page)) { in try_to_unmap_one()
1528 hugetlb_count_sub(compound_nr(page), mm); in try_to_unmap_one()
1533 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1548 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1579 } else if (PageAnon(page)) { in try_to_unmap_one()
1586 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { in try_to_unmap_one()
1597 if (!PageSwapBacked(page)) { in try_to_unmap_one()
1598 if (!PageDirty(page)) { in try_to_unmap_one()
1611 SetPageSwapBacked(page); in try_to_unmap_one()
1655 dec_mm_counter(mm, mm_counter_file(page)); in try_to_unmap_one()
1665 page_remove_rmap(subpage, PageHuge(page)); in try_to_unmap_one()
1666 put_page(page); in try_to_unmap_one()
1693 static int page_mapcount_is_zero(struct page *page) in page_mapcount_is_zero() argument
1695 return !total_mapcount(page); in page_mapcount_is_zero()
1708 bool try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1726 && !PageKsm(page) && PageAnon(page)) in try_to_unmap()
1730 rmap_walk_locked(page, &rwc); in try_to_unmap()
1732 rmap_walk(page, &rwc); in try_to_unmap()
1734 return !page_mapcount(page) ? true : false; in try_to_unmap()
1737 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1739 return !page_mapped(page); in page_not_mapped()
1751 void try_to_munlock(struct page *page) in try_to_munlock() argument
1761 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in try_to_munlock()
1762 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in try_to_munlock()
1764 rmap_walk(page, &rwc); in try_to_munlock()
1776 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
1782 return rwc->anon_lock(page); in rmap_walk_anon_lock()
1790 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
1812 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_anon() argument
1820 anon_vma = page_anon_vma(page); in rmap_walk_anon()
1822 VM_BUG_ON_PAGE(!anon_vma, page); in rmap_walk_anon()
1824 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
1829 pgoff_start = page_to_pgoff(page); in rmap_walk_anon()
1830 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; in rmap_walk_anon()
1834 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1841 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1843 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
1864 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_file() argument
1867 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
1877 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
1882 pgoff_start = page_to_pgoff(page); in rmap_walk_file()
1883 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; in rmap_walk_file()
1888 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1895 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
1897 if (rwc->done && rwc->done(page)) in rmap_walk_file()
1906 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
1908 if (unlikely(PageKsm(page))) in rmap_walk()
1909 rmap_walk_ksm(page, rwc); in rmap_walk()
1910 else if (PageAnon(page)) in rmap_walk()
1911 rmap_walk_anon(page, rwc, false); in rmap_walk()
1913 rmap_walk_file(page, rwc, false); in rmap_walk()
1917 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
1920 VM_BUG_ON_PAGE(PageKsm(page), page); in rmap_walk_locked()
1921 if (PageAnon(page)) in rmap_walk_locked()
1922 rmap_walk_anon(page, rwc, true); in rmap_walk_locked()
1924 rmap_walk_file(page, rwc, true); in rmap_walk_locked()
1933 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
1939 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
1942 first = atomic_inc_and_test(compound_mapcount_ptr(page)); in hugepage_add_anon_rmap()
1944 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1947 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
1951 atomic_set(compound_mapcount_ptr(page), 0); in hugepage_add_new_anon_rmap()
1952 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()