Lines Matching full:page

10  * Provides methods for unmapping each kind of mapped page:
26 * page->flags PG_locked (lock_page) * (see hugetlbfs below)
29 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
52 * page->flags PG_locked (lock_page)
273 * searches where page is mapped.
463 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
467 * that might have been relevant to this page.
469 * The page might have been remapped to a different anon_vma or the anon_vma
474 * ensure that any anon_vma obtained from the page will still be valid for as
478 * chain and verify that the page in question is indeed mapped in it
482 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
486 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
492 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
495 if (!page_mapped(page)) in page_get_anon_vma()
505 * If this page is still mapped, then its anon_vma cannot have been in page_get_anon_vma()
511 if (!page_mapped(page)) { in page_get_anon_vma()
529 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
536 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
539 if (!page_mapped(page)) in page_lock_anon_vma_read()
546 * If the page is still mapped, then this anon_vma is still in page_lock_anon_vma_read()
550 if (!page_mapped(page)) { in page_lock_anon_vma_read()
563 if (!page_mapped(page)) { in page_lock_anon_vma_read()
600 * before any IO is initiated on the page to prevent lost writes. Similarly,
641 * before the page is queued for IO. in set_tlb_ubc_flush_pending()
670 * the page and flushing the page. If this race occurs, it potentially allows
706 * At what user virtual address is page expected in vma?
707 * Caller should check the page is actually part of the vma.
709 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
711 if (PageAnon(page)) { in page_address_in_vma()
712 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
722 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma()
726 return vma_address(page, vma); in page_address_in_vma()
772 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
777 .page = page, in page_referenced_one()
798 * If the page has been used in another mapping, in page_referenced_one()
801 * PG_referenced or activated the page. in page_referenced_one()
811 /* unexpected pmd-mapped page? */ in page_referenced_one()
819 clear_page_idle(page); in page_referenced_one()
820 if (test_and_clear_page_young(page)) in page_referenced_one()
846 * page_referenced - test if the page was referenced
847 * @page: the page to test
848 * @is_locked: caller holds lock on the page
850 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
852 * Quick test_and_clear_referenced for all mappings to a page,
853 * returns the number of ptes which referenced the page.
855 int page_referenced(struct page *page, in page_referenced() argument
862 .mapcount = total_mapcount(page), in page_referenced()
875 if (!page_rmapping(page)) in page_referenced()
878 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
879 we_locked = trylock_page(page); in page_referenced()
893 rmap_walk(page, &rwc); in page_referenced()
897 unlock_page(page); in page_referenced()
902 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
906 .page = page, in page_mkclean_one()
916 * the page can not be free from this function. in page_mkclean_one()
920 vma_address_end(page, vma)); in page_mkclean_one()
948 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
955 /* unexpected pmd-mapped page? */ in page_mkclean_one()
962 * downgrading page table protection not changing it to point in page_mkclean_one()
963 * to a new page. in page_mkclean_one()
984 int page_mkclean(struct page *page) in page_mkclean() argument
994 BUG_ON(!PageLocked(page)); in page_mkclean()
996 if (!page_mapped(page)) in page_mkclean()
999 mapping = page_mapping(page); in page_mkclean()
1003 rmap_walk(page, &rwc); in page_mkclean()
1010 * page_move_anon_rmap - move a page to our anon_vma
1011 * @page: the page to move to our anon_vma
1012 * @vma: the vma the page belongs to
1014 * When a page belongs exclusively to one process after a COW event,
1015 * that page can be moved into the anon_vma that belongs to just that
1019 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1023 page = compound_head(page); in page_move_anon_rmap()
1025 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1034 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1039 * @page: Page or Hugepage to add to rmap
1040 * @vma: VM area to add page to.
1042 * @exclusive: the page is exclusively owned by the current process
1044 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1051 if (PageAnon(page)) in __page_set_anon_rmap()
1055 * If the page isn't exclusively mapped into this vma, in __page_set_anon_rmap()
1057 * page mapping! in __page_set_anon_rmap()
1063 * page_idle does a lockless/optimistic rmap scan on page->mapping. in __page_set_anon_rmap()
1069 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in __page_set_anon_rmap()
1070 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1075 * @page: the page to add the mapping to
1079 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1083 * The page's anon-rmap details (mapping and index) are guaranteed to in __page_check_anon_rmap()
1087 * always holds the page locked. in __page_check_anon_rmap()
1093 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); in __page_check_anon_rmap()
1094 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1095 page); in __page_check_anon_rmap()
1099 * page_add_anon_rmap - add pte mapping to an anonymous page
1100 * @page: the page to add the mapping to
1103 * @compound: charge the page as compound or small page
1105 * The caller needs to hold the pte lock, and the page must be locked in
1110 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1113 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1121 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1127 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1128 lock_page_memcg(page); in do_page_add_anon_rmap()
1130 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1134 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1135 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in do_page_add_anon_rmap()
1136 mapcount = compound_mapcount_ptr(page); in do_page_add_anon_rmap()
1139 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1143 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap()
1151 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); in do_page_add_anon_rmap()
1152 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1155 if (unlikely(PageKsm(page))) { in do_page_add_anon_rmap()
1156 unlock_page_memcg(page); in do_page_add_anon_rmap()
1162 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1165 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1169 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1170 * @page: the page to add the mapping to
1173 * @compound: charge the page as compound or small page
1177 * Page does not have to be locked.
1179 void page_add_new_anon_rmap(struct page *page, in page_add_new_anon_rmap() argument
1182 int nr = compound ? thp_nr_pages(page) : 1; in page_add_new_anon_rmap()
1185 __SetPageSwapBacked(page); in page_add_new_anon_rmap()
1187 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in page_add_new_anon_rmap()
1189 atomic_set(compound_mapcount_ptr(page), 0); in page_add_new_anon_rmap()
1190 if (hpage_pincount_available(page)) in page_add_new_anon_rmap()
1191 atomic_set(compound_pincount_ptr(page), 0); in page_add_new_anon_rmap()
1193 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); in page_add_new_anon_rmap()
1196 VM_BUG_ON_PAGE(PageTransCompound(page), page); in page_add_new_anon_rmap()
1198 atomic_set(&page->_mapcount, 0); in page_add_new_anon_rmap()
1200 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in page_add_new_anon_rmap()
1201 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1205 * page_add_file_rmap - add pte mapping to a file page
1206 * @page: the page to add the mapping to
1207 * @compound: charge the page as compound or small page
1211 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1215 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1216 lock_page_memcg(page); in page_add_file_rmap()
1217 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1218 int nr_pages = thp_nr_pages(page); in page_add_file_rmap()
1221 if (atomic_inc_and_test(&page[i]._mapcount)) in page_add_file_rmap()
1224 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) in page_add_file_rmap()
1226 if (PageSwapBacked(page)) in page_add_file_rmap()
1227 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, in page_add_file_rmap()
1230 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, in page_add_file_rmap()
1233 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1234 struct page *head = compound_head(page); in page_add_file_rmap()
1236 VM_WARN_ON_ONCE(!PageLocked(page)); in page_add_file_rmap()
1239 if (PageMlocked(page)) in page_add_file_rmap()
1242 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1245 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1247 unlock_page_memcg(page); in page_add_file_rmap()
1250 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1254 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1257 if (unlikely(PageHuge(page))) { in page_remove_file_rmap()
1259 atomic_dec(compound_mapcount_ptr(page)); in page_remove_file_rmap()
1263 /* page still mapped by someone else? */ in page_remove_file_rmap()
1264 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1265 int nr_pages = thp_nr_pages(page); in page_remove_file_rmap()
1268 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1271 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1273 if (PageSwapBacked(page)) in page_remove_file_rmap()
1274 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, in page_remove_file_rmap()
1277 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, in page_remove_file_rmap()
1280 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1289 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1291 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1292 clear_page_mlock(page); in page_remove_file_rmap()
1295 static void page_remove_anon_compound_rmap(struct page *page) in page_remove_anon_compound_rmap() argument
1299 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1303 if (unlikely(PageHuge(page))) in page_remove_anon_compound_rmap()
1309 __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); in page_remove_anon_compound_rmap()
1311 if (TestClearPageDoubleMap(page)) { in page_remove_anon_compound_rmap()
1316 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_anon_compound_rmap()
1317 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1322 * Queue the page for deferred split if at least one small in page_remove_anon_compound_rmap()
1323 * page of the compound page is unmapped, but at least one in page_remove_anon_compound_rmap()
1324 * small page is still mapped. in page_remove_anon_compound_rmap()
1326 if (nr && nr < thp_nr_pages(page)) in page_remove_anon_compound_rmap()
1327 deferred_split_huge_page(page); in page_remove_anon_compound_rmap()
1329 nr = thp_nr_pages(page); in page_remove_anon_compound_rmap()
1332 if (unlikely(PageMlocked(page))) in page_remove_anon_compound_rmap()
1333 clear_page_mlock(page); in page_remove_anon_compound_rmap()
1336 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1340 * page_remove_rmap - take down pte mapping from a page
1341 * @page: page to remove mapping from
1342 * @compound: uncharge the page as compound or small page
1346 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1348 lock_page_memcg(page); in page_remove_rmap()
1350 if (!PageAnon(page)) { in page_remove_rmap()
1351 page_remove_file_rmap(page, compound); in page_remove_rmap()
1356 page_remove_anon_compound_rmap(page); in page_remove_rmap()
1360 /* page still mapped by someone else? */ in page_remove_rmap()
1361 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1369 __dec_lruvec_page_state(page, NR_ANON_MAPPED); in page_remove_rmap()
1371 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1372 clear_page_mlock(page); in page_remove_rmap()
1374 if (PageTransCompound(page)) in page_remove_rmap()
1375 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
1387 unlock_page_memcg(page); in page_remove_rmap()
1393 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1398 .page = page, in try_to_unmap_one()
1403 struct page *subpage; in try_to_unmap_one()
1412 * if page table locking is skipped: use TTU_SYNC to wait for that. in try_to_unmap_one()
1418 split_huge_pmd_address(vma, address, false, page); in try_to_unmap_one()
1425 * Note that the page can not be free in this function as call of in try_to_unmap_one()
1426 * try_to_unmap() must hold a reference on the page. in try_to_unmap_one()
1428 range.end = PageKsm(page) ? in try_to_unmap_one()
1429 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_unmap_one()
1432 if (PageHuge(page)) { in try_to_unmap_one()
1444 * If the page is mlock()d, we cannot swap it out. in try_to_unmap_one()
1454 if (!PageTransCompound(page) || (PageHead(page) && in try_to_unmap_one()
1455 !PageDoubleMap(page) && !PageAnon(page))) in try_to_unmap_one()
1456 mlock_vma_page(page); in try_to_unmap_one()
1463 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
1465 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1468 if (PageHuge(page) && !PageAnon(page)) { in try_to_unmap_one()
1478 * page. There is no way of knowing exactly in try_to_unmap_one()
1489 * The ref count of the PMD page was dropped in try_to_unmap_one()
1494 * unmap the actual page and drop map count in try_to_unmap_one()
1502 /* Nuke the page table entry. */ in try_to_unmap_one()
1507 * a remote CPU could still be writing to the page. in try_to_unmap_one()
1520 /* Move the dirty bit to the page. Now the pte is gone. */ in try_to_unmap_one()
1522 set_page_dirty(page); in try_to_unmap_one()
1527 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1529 if (PageHuge(page)) { in try_to_unmap_one()
1530 hugetlb_count_sub(compound_nr(page), mm); in try_to_unmap_one()
1535 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1541 * The guest indicated that the page content is of no in try_to_unmap_one()
1545 * page. When userfaultfd is active, we must not drop in try_to_unmap_one()
1546 * this page though, as its main user (postcopy in try_to_unmap_one()
1550 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1554 } else if (PageAnon(page)) { in try_to_unmap_one()
1561 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { in try_to_unmap_one()
1571 /* MADV_FREE page check */ in try_to_unmap_one()
1572 if (!PageSwapBacked(page)) { in try_to_unmap_one()
1573 if (!PageDirty(page)) { in try_to_unmap_one()
1582 * If the page was redirtied, it cannot be in try_to_unmap_one()
1583 * discarded. Remap the page to page table. in try_to_unmap_one()
1586 SetPageSwapBacked(page); in try_to_unmap_one()
1623 * This is a locked file-backed page, thus it cannot in try_to_unmap_one()
1624 * be removed from the page cache and replaced by a new in try_to_unmap_one()
1625 * page before mmu_notifier_invalidate_range_end, so no in try_to_unmap_one()
1626 * concurrent thread might update its page table to in try_to_unmap_one()
1627 * point at new page while a device still is using this in try_to_unmap_one()
1628 * page. in try_to_unmap_one()
1632 dec_mm_counter(mm, mm_counter_file(page)); in try_to_unmap_one()
1637 * done above for all cases requiring it to happen under page in try_to_unmap_one()
1642 page_remove_rmap(subpage, PageHuge(page)); in try_to_unmap_one()
1643 put_page(page); in try_to_unmap_one()
1656 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1658 return !page_mapped(page); in page_not_mapped()
1662 * try_to_unmap - try to remove all page table mappings to a page
1663 * @page: the page to get unmapped
1666 * Tries to remove all the page table entries which are mapping this
1667 * page, used in the pageout path. Caller must hold the page lock.
1669 * It is the caller's responsibility to check if the page is still
1672 void try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1682 rmap_walk_locked(page, &rwc); in try_to_unmap()
1684 rmap_walk(page, &rwc); in try_to_unmap()
1693 static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, in try_to_migrate_one() argument
1698 .page = page, in try_to_migrate_one()
1703 struct page *subpage; in try_to_migrate_one()
1712 * if page table locking is skipped: use TTU_SYNC to wait for that. in try_to_migrate_one()
1722 split_huge_pmd_address(vma, address, true, page); in try_to_migrate_one()
1729 * Note that the page can not be free in this function as call of in try_to_migrate_one()
1730 * try_to_unmap() must hold a reference on the page. in try_to_migrate_one()
1732 range.end = PageKsm(page) ? in try_to_migrate_one()
1733 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_migrate_one()
1736 if (PageHuge(page)) { in try_to_migrate_one()
1750 VM_BUG_ON_PAGE(PageHuge(page) || in try_to_migrate_one()
1751 !PageTransCompound(page), page); in try_to_migrate_one()
1753 set_pmd_migration_entry(&pvmw, page); in try_to_migrate_one()
1759 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_migrate_one()
1761 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_migrate_one()
1764 if (PageHuge(page) && !PageAnon(page)) { in try_to_migrate_one()
1774 * page. There is no way of knowing exactly in try_to_migrate_one()
1785 * The ref count of the PMD page was dropped in try_to_migrate_one()
1790 * unmap the actual page and drop map count in try_to_migrate_one()
1798 /* Nuke the page table entry. */ in try_to_migrate_one()
1802 /* Move the dirty bit to the page. Now the pte is gone. */ in try_to_migrate_one()
1804 set_page_dirty(page); in try_to_migrate_one()
1809 if (is_zone_device_page(page)) { in try_to_migrate_one()
1814 * Store the pfn of the page in a special migration in try_to_migrate_one()
1819 page_to_pfn(page)); in try_to_migrate_one()
1823 * pteval maps a zone device page and is therefore in try_to_migrate_one()
1838 * migrated, just set it to page. This will need to be in try_to_migrate_one()
1842 subpage = page; in try_to_migrate_one()
1843 } else if (PageHWPoison(page)) { in try_to_migrate_one()
1845 if (PageHuge(page)) { in try_to_migrate_one()
1846 hugetlb_count_sub(compound_nr(page), mm); in try_to_migrate_one()
1851 dec_mm_counter(mm, mm_counter(page)); in try_to_migrate_one()
1857 * The guest indicated that the page content is of no in try_to_migrate_one()
1861 * page. When userfaultfd is active, we must not drop in try_to_migrate_one()
1862 * this page though, as its main user (postcopy in try_to_migrate_one()
1866 dec_mm_counter(mm, mm_counter(page)); in try_to_migrate_one()
1882 * Store the pfn of the page in a special migration in try_to_migrate_one()
1907 * done above for all cases requiring it to happen under page in try_to_migrate_one()
1912 page_remove_rmap(subpage, PageHuge(page)); in try_to_migrate_one()
1913 put_page(page); in try_to_migrate_one()
1922 * try_to_migrate - try to replace all page table mappings with swap entries
1923 * @page: the page to replace page table entries for
1926 * Tries to remove all the page table entries which are mapping this page and
1927 * replace them with special swap entries. Caller must hold the page lock.
1929 void try_to_migrate(struct page *page, enum ttu_flags flags) in try_to_migrate() argument
1946 if (is_zone_device_page(page) && !is_device_private_page(page)) in try_to_migrate()
1952 * page tables leading to a race where migration cannot in try_to_migrate()
1957 if (!PageKsm(page) && PageAnon(page)) in try_to_migrate()
1961 rmap_walk_locked(page, &rwc); in try_to_migrate()
1963 rmap_walk(page, &rwc); in try_to_migrate()
1967 * Walks the vma's mapping a page and mlocks the page if any locked vma's are
1968 * found. Once one is found the page is locked and the scan can be terminated.
1970 static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, in page_mlock_one() argument
1974 .page = page, in page_mlock_one()
1996 mlock_vma_page(page); in page_mlock_one()
1998 * No need to scan further once the page is marked in page_mlock_one()
2010 * page_mlock - try to mlock a page
2011 * @page: the page to be mlocked
2013 * Called from munlock code. Checks all of the VMAs mapping the page and mlocks
2014 * the page if any are found. The page will be returned with PG_mlocked cleared
2017 void page_mlock(struct page *page) in page_mlock() argument
2026 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in page_mlock()
2027 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in page_mlock()
2030 if (PageTransCompound(page) && PageAnon(page)) in page_mlock()
2033 rmap_walk(page, &rwc); in page_mlock()
2044 static bool page_make_device_exclusive_one(struct page *page, in page_make_device_exclusive_one() argument
2049 .page = page, in page_make_device_exclusive_one()
2055 struct page *subpage; in page_make_device_exclusive_one()
2063 address + page_size(page)), args->owner); in page_make_device_exclusive_one()
2068 VM_BUG_ON_PAGE(!pvmw.pte, page); in page_make_device_exclusive_one()
2076 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in page_make_device_exclusive_one()
2079 /* Nuke the page table entry. */ in page_make_device_exclusive_one()
2083 /* Move the dirty bit to the page. Now the pte is gone. */ in page_make_device_exclusive_one()
2085 set_page_dirty(page); in page_make_device_exclusive_one()
2088 * Check that our target page is still mapped at the expected in page_make_device_exclusive_one()
2096 * Store the pfn of the page in a special migration in page_make_device_exclusive_one()
2115 * There is a reference on the page for the swap entry which has in page_make_device_exclusive_one()
2127 * page_make_device_exclusive - mark the page exclusively owned by a device
2128 * @page: the page to replace page table entries for
2129 * @mm: the mm_struct where the page is expected to be mapped
2130 * @address: address where the page is expected to be mapped
2133 * Tries to remove all the page table entries which are mapping this page and
2135 * exclusive access to the page. Caller must hold the page lock.
2137 * Returns false if the page is still mapped, or if it could not be unmapped
2140 static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm, in page_make_device_exclusive() argument
2161 if (!PageAnon(page) || PageTail(page)) in page_make_device_exclusive()
2164 rmap_walk(page, &rwc); in page_make_device_exclusive()
2166 return args.valid && !page_mapcount(page); in page_make_device_exclusive()
2177 * Returns: number of pages found in the range by GUP. A page is marked for
2178 * exclusive access only if the page pointer is non-NULL.
2180 * This function finds ptes mapping page(s) to the given address range, locks
2187 * programming is complete it should drop the page lock and reference after
2188 * which point CPU access to the page will revoke the exclusive access.
2191 unsigned long end, struct page **pages, in make_device_exclusive_range()
2231 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
2237 return rwc->anon_lock(page); in rmap_walk_anon_lock()
2245 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
2254 * rmap_walk_anon - do something to anonymous page using the object-based
2256 * @page: the page to be handled
2259 * Find all the mappings of a page using the mapping pointer and the vma chains
2263 * where the page was found will be held for write. So, we won't recheck
2267 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_anon() argument
2275 anon_vma = page_anon_vma(page); in rmap_walk_anon()
2277 VM_BUG_ON_PAGE(!anon_vma, page); in rmap_walk_anon()
2279 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
2284 pgoff_start = page_to_pgoff(page); in rmap_walk_anon()
2285 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_anon()
2289 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
2297 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
2299 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
2308 * rmap_walk_file - do something to file page using the object-based rmap method
2309 * @page: the page to be handled
2312 * Find all the mappings of a page using the mapping pointer and the vma chains
2316 * where the page was found will be held for write. So, we won't recheck
2320 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_file() argument
2323 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
2328 * The page lock not only makes sure that page->mapping cannot in rmap_walk_file()
2333 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
2338 pgoff_start = page_to_pgoff(page); in rmap_walk_file()
2339 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_file()
2344 unsigned long address = vma_address(page, vma); in rmap_walk_file()
2352 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
2354 if (rwc->done && rwc->done(page)) in rmap_walk_file()
2363 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
2365 if (unlikely(PageKsm(page))) in rmap_walk()
2366 rmap_walk_ksm(page, rwc); in rmap_walk()
2367 else if (PageAnon(page)) in rmap_walk()
2368 rmap_walk_anon(page, rwc, false); in rmap_walk()
2370 rmap_walk_file(page, rwc, false); in rmap_walk()
2374 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2377 VM_BUG_ON_PAGE(PageKsm(page), page); in rmap_walk_locked()
2378 if (PageAnon(page)) in rmap_walk_locked()
2379 rmap_walk_anon(page, rwc, true); in rmap_walk_locked()
2381 rmap_walk_file(page, rwc, true); in rmap_walk_locked()
2390 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
2396 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
2399 first = atomic_inc_and_test(compound_mapcount_ptr(page)); in hugepage_add_anon_rmap()
2401 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
2404 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
2408 atomic_set(compound_mapcount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2409 if (hpage_pincount_available(page)) in hugepage_add_new_anon_rmap()
2410 atomic_set(compound_pincount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2412 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()