Lines Matching full:page
7 * Page migration was first developed in the context of the memory hotplug
61 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
69 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page()
72 * release this page, thus avoiding a nasty leakage. in isolate_movable_page()
74 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
78 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page()
79 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page()
80 * so unconditionally grabbing the lock ruins page's owner side. in isolate_movable_page()
82 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
86 * compaction threads can race against page migration functions in isolate_movable_page()
87 * as well as race against the releasing a page. in isolate_movable_page()
89 * In order to avoid having an already isolated movable page in isolate_movable_page()
92 * lets be sure we have the page lock in isolate_movable_page()
93 * before proceeding with the movable page isolation steps. in isolate_movable_page()
95 if (unlikely(!trylock_page(page))) in isolate_movable_page()
98 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
101 mapping = page_mapping(page); in isolate_movable_page()
102 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
104 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
107 /* Driver shouldn't use PG_isolated bit of page->flags */ in isolate_movable_page()
108 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
109 __SetPageIsolated(page); in isolate_movable_page()
110 unlock_page(page); in isolate_movable_page()
115 unlock_page(page); in isolate_movable_page()
117 put_page(page); in isolate_movable_page()
122 static void putback_movable_page(struct page *page) in putback_movable_page() argument
126 mapping = page_mapping(page); in putback_movable_page()
127 mapping->a_ops->putback_page(page); in putback_movable_page()
128 __ClearPageIsolated(page); in putback_movable_page()
136 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
141 struct page *page; in putback_movable_pages() local
142 struct page *page2; in putback_movable_pages()
144 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
145 if (unlikely(PageHuge(page))) { in putback_movable_pages()
146 putback_active_hugepage(page); in putback_movable_pages()
149 list_del(&page->lru); in putback_movable_pages()
151 * We isolated non-lru movable page so here we can use in putback_movable_pages()
152 * __PageMovable because LRU page's mapping cannot have in putback_movable_pages()
155 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
156 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
157 lock_page(page); in putback_movable_pages()
158 if (PageMovable(page)) in putback_movable_pages()
159 putback_movable_page(page); in putback_movable_pages()
161 __ClearPageIsolated(page); in putback_movable_pages()
162 unlock_page(page); in putback_movable_pages()
163 put_page(page); in putback_movable_pages()
165 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
166 page_is_file_lru(page), -thp_nr_pages(page)); in putback_movable_pages()
167 putback_lru_page(page); in putback_movable_pages()
175 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
179 .page = old, in remove_migration_pte()
184 struct page *new; in remove_migration_pte()
188 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
190 if (PageKsm(page)) in remove_migration_pte()
191 new = page; in remove_migration_pte()
193 new = page - pvmw.page->index + in remove_migration_pte()
199 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
257 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
258 clear_page_mlock(page); in remove_migration_pte()
269 * references to the indicated page.
271 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
285 * Something used the pte of a page under migration. We need to
286 * get to the page and wait until migration is finished.
294 struct page *page; in __migration_entry_wait() local
305 page = pfn_swap_entry_to_page(entry); in __migration_entry_wait()
306 page = compound_head(page); in __migration_entry_wait()
309 * Once page cache replacement of page migration started, page_count in __migration_entry_wait()
313 if (!get_page_unless_zero(page)) in __migration_entry_wait()
316 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); in __migration_entry_wait()
341 struct page *page; in pmd_migration_entry_wait() local
346 page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
347 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
350 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); in pmd_migration_entry_wait()
357 static int expected_page_refs(struct address_space *mapping, struct page *page) in expected_page_refs() argument
365 expected_count += is_device_private_page(page); in expected_page_refs()
367 expected_count += thp_nr_pages(page) + page_has_private(page); in expected_page_refs()
373 * Replace the page in the mapping.
381 struct page *newpage, struct page *page, int extra_count) in migrate_page_move_mapping() argument
383 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping()
386 int expected_count = expected_page_refs(mapping, page) + extra_count; in migrate_page_move_mapping()
387 int nr = thp_nr_pages(page); in migrate_page_move_mapping()
390 /* Anonymous page without mapping */ in migrate_page_move_mapping()
391 if (page_count(page) != expected_count) in migrate_page_move_mapping()
395 newpage->index = page->index; in migrate_page_move_mapping()
396 newpage->mapping = page->mapping; in migrate_page_move_mapping()
397 if (PageSwapBacked(page)) in migrate_page_move_mapping()
403 oldzone = page_zone(page); in migrate_page_move_mapping()
407 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping()
412 if (!page_ref_freeze(page, expected_count)) { in migrate_page_move_mapping()
418 * Now we know that no one else is looking at the page: in migrate_page_move_mapping()
421 newpage->index = page->index; in migrate_page_move_mapping()
422 newpage->mapping = page->mapping; in migrate_page_move_mapping()
424 if (PageSwapBacked(page)) { in migrate_page_move_mapping()
426 if (PageSwapCache(page)) { in migrate_page_move_mapping()
428 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
431 VM_BUG_ON_PAGE(PageSwapCache(page), page); in migrate_page_move_mapping()
434 /* Move dirty while page refs frozen and newpage not yet exposed */ in migrate_page_move_mapping()
435 dirty = PageDirty(page); in migrate_page_move_mapping()
437 ClearPageDirty(page); in migrate_page_move_mapping()
442 if (PageTransHuge(page)) { in migrate_page_move_mapping()
452 * Drop cache reference from old page by unfreezing in migrate_page_move_mapping()
456 page_ref_unfreeze(page, expected_count - nr); in migrate_page_move_mapping()
463 * the page for that zone. Other VM counters will be in migrate_page_move_mapping()
465 * new page and drop references to the old page. in migrate_page_move_mapping()
475 memcg = page_memcg(page); in migrate_page_move_mapping()
481 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
486 if (PageSwapCache(page)) { in migrate_page_move_mapping()
509 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
511 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
515 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
516 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_huge_page_move_mapping()
521 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
526 newpage->index = page->index; in migrate_huge_page_move_mapping()
527 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
533 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
541 * Copy the page to its new location
543 void migrate_page_states(struct page *newpage, struct page *page) in migrate_page_states() argument
547 if (PageError(page)) in migrate_page_states()
549 if (PageReferenced(page)) in migrate_page_states()
551 if (PageUptodate(page)) in migrate_page_states()
553 if (TestClearPageActive(page)) { in migrate_page_states()
554 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_states()
556 } else if (TestClearPageUnevictable(page)) in migrate_page_states()
558 if (PageWorkingset(page)) in migrate_page_states()
560 if (PageChecked(page)) in migrate_page_states()
562 if (PageMappedToDisk(page)) in migrate_page_states()
566 if (PageDirty(page)) in migrate_page_states()
569 if (page_is_young(page)) in migrate_page_states()
571 if (page_is_idle(page)) in migrate_page_states()
575 * Copy NUMA information to the new page, to prevent over-eager in migrate_page_states()
576 * future migrations of this same page. in migrate_page_states()
578 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_states()
581 ksm_migrate_page(newpage, page); in migrate_page_states()
586 if (PageSwapCache(page)) in migrate_page_states()
587 ClearPageSwapCache(page); in migrate_page_states()
588 ClearPagePrivate(page); in migrate_page_states()
590 /* page->private contains hugetlb specific flags */ in migrate_page_states()
591 if (!PageHuge(page)) in migrate_page_states()
592 set_page_private(page, 0); in migrate_page_states()
595 * If any waiters have accumulated on the new page then in migrate_page_states()
606 if (PageReadahead(page)) in migrate_page_states()
609 copy_page_owner(page, newpage); in migrate_page_states()
611 if (!PageHuge(page)) in migrate_page_states()
612 mem_cgroup_migrate(page, newpage); in migrate_page_states()
616 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
618 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
619 copy_huge_page(newpage, page); in migrate_page_copy()
621 copy_highpage(newpage, page); in migrate_page_copy()
623 migrate_page_states(newpage, page); in migrate_page_copy()
632 * Common logic to directly migrate a single LRU page suitable for
638 struct page *newpage, struct page *page, in migrate_page() argument
643 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page()
645 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in migrate_page()
651 migrate_page_copy(newpage, page); in migrate_page()
653 migrate_page_states(newpage, page); in migrate_page()
698 struct page *newpage, struct page *page, enum migrate_mode mode, in __buffer_migrate_page() argument
705 if (!page_has_buffers(page)) in __buffer_migrate_page()
706 return migrate_page(mapping, newpage, page, mode); in __buffer_migrate_page()
708 /* Check whether page does not have extra refs before we do more work */ in __buffer_migrate_page()
709 expected_count = expected_page_refs(mapping, page); in __buffer_migrate_page()
710 if (page_count(page) != expected_count) in __buffer_migrate_page()
713 head = page_buffers(page); in __buffer_migrate_page()
744 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in __buffer_migrate_page()
748 attach_page_private(newpage, detach_page_private(page)); in __buffer_migrate_page()
758 migrate_page_copy(newpage, page); in __buffer_migrate_page()
760 migrate_page_states(newpage, page); in __buffer_migrate_page()
778 * if the underlying filesystem guarantees that no other references to "page"
779 * exist. For example attached buffer heads are accessed only under page lock.
782 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
784 return __buffer_migrate_page(mapping, newpage, page, mode, false); in buffer_migrate_page()
795 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page_norefs() argument
797 return __buffer_migrate_page(mapping, newpage, page, mode, true); in buffer_migrate_page_norefs()
802 * Writeback a page to clean the dirty state
804 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
819 if (!clear_page_dirty_for_io(page)) in writeout()
824 * A dirty page may imply that the underlying filesystem has in writeout()
825 * the page on some queue. So the page must be clean for in writeout()
827 * page state is no longer what we checked for earlier. in writeout()
831 remove_migration_ptes(page, page, false); in writeout()
833 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
837 lock_page(page); in writeout()
846 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
848 if (PageDirty(page)) { in fallback_migrate_page()
857 return writeout(mapping, page); in fallback_migrate_page()
864 if (page_has_private(page) && in fallback_migrate_page()
865 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
868 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
872 * Move a page to a newly allocated page
873 * The page is locked and all ptes have been successfully removed.
875 * The new page will have replaced the old page if this function
882 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
887 bool is_lru = !__PageMovable(page); in move_to_new_page()
889 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
892 mapping = page_mapping(page); in move_to_new_page()
896 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
903 * for page migration. in move_to_new_page()
906 page, mode); in move_to_new_page()
909 page, mode); in move_to_new_page()
912 * In case of non-lru page, it could be released after in move_to_new_page()
915 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
916 if (!PageMovable(page)) { in move_to_new_page()
918 __ClearPageIsolated(page); in move_to_new_page()
923 page, mode); in move_to_new_page()
925 !PageIsolated(page)); in move_to_new_page()
929 * When successful, old pagecache page->mapping must be cleared before in move_to_new_page()
930 * page is freed; but stats require that PageAnon be left as PageAnon. in move_to_new_page()
933 if (__PageMovable(page)) { in move_to_new_page()
934 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
938 * cannot try to migrate this page. in move_to_new_page()
940 __ClearPageIsolated(page); in move_to_new_page()
944 * Anonymous and movable page->mapping will be cleared by in move_to_new_page()
948 if (!PageMappingFlags(page)) in move_to_new_page()
949 page->mapping = NULL; in move_to_new_page()
959 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
965 bool is_lru = !__PageMovable(page); in __unmap_and_move()
967 if (!trylock_page(page)) { in __unmap_and_move()
973 * For example, during page readahead pages are added locked in __unmap_and_move()
978 * second or third page, the process can end up locking in __unmap_and_move()
979 * the same page twice and deadlocking. Rather than in __unmap_and_move()
987 lock_page(page); in __unmap_and_move()
990 if (PageWriteback(page)) { in __unmap_and_move()
1007 wait_on_page_writeback(page); in __unmap_and_move()
1011 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, in __unmap_and_move()
1012 * we cannot notice that anon_vma is freed while we migrates a page. in __unmap_and_move()
1016 * just care Anon page here. in __unmap_and_move()
1021 * because that implies that the anon page is no longer mapped in __unmap_and_move()
1022 * (and cannot be remapped so long as we hold the page lock). in __unmap_and_move()
1024 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1025 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1028 * Block others from accessing the new page when we get around to in __unmap_and_move()
1039 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1045 * 1. When a new swap-cache page is read into, it is added to the LRU in __unmap_and_move()
1047 * Calling try_to_unmap() against a page->mapping==NULL page will in __unmap_and_move()
1049 * 2. An orphaned page (see truncate_cleanup_page) might have in __unmap_and_move()
1050 * fs-private metadata. The page can be picked up due to memory in __unmap_and_move()
1051 * offlining. Everywhere else except page reclaim, the page is in __unmap_and_move()
1052 * invisible to the vm, so the page can not be migrated. So try to in __unmap_and_move()
1053 * free the metadata, so the page can be freed. in __unmap_and_move()
1055 if (!page->mapping) { in __unmap_and_move()
1056 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1057 if (page_has_private(page)) { in __unmap_and_move()
1058 try_to_free_buffers(page); in __unmap_and_move()
1061 } else if (page_mapped(page)) { in __unmap_and_move()
1063 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1064 page); in __unmap_and_move()
1065 try_to_migrate(page, 0); in __unmap_and_move()
1069 if (!page_mapped(page)) in __unmap_and_move()
1070 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1073 remove_migration_ptes(page, in __unmap_and_move()
1074 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1082 unlock_page(page); in __unmap_and_move()
1086 * which will not free the page because new page owner increased in __unmap_and_move()
1087 * refcounter. As well, if it is LRU page, add the page to LRU in __unmap_and_move()
1088 * list in here. Use the old state of the isolated source page to in __unmap_and_move()
1089 * determine if we migrated a LRU page. newpage was already unlocked in __unmap_and_move()
1090 * and possibly modified by its owner - don't rely on the page in __unmap_and_move()
1178 * Obtain the lock on page, remove all ptes and migrate the page
1179 * to the newly allocated page in newpage.
1183 unsigned long private, struct page *page, in unmap_and_move() argument
1189 struct page *newpage = NULL; in unmap_and_move()
1191 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1194 if (page_count(page) == 1) { in unmap_and_move()
1195 /* page was freed from under us. So we are done. */ in unmap_and_move()
1196 ClearPageActive(page); in unmap_and_move()
1197 ClearPageUnevictable(page); in unmap_and_move()
1198 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1199 lock_page(page); in unmap_and_move()
1200 if (!PageMovable(page)) in unmap_and_move()
1201 __ClearPageIsolated(page); in unmap_and_move()
1202 unlock_page(page); in unmap_and_move()
1207 newpage = get_new_page(page, private); in unmap_and_move()
1211 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1218 * A page that has been migrated has all references in unmap_and_move()
1219 * removed and will be freed. A page that has not been in unmap_and_move()
1222 list_del(&page->lru); in unmap_and_move()
1227 * isolation. Otherwise, restore the page to right list unless in unmap_and_move()
1236 if (likely(!__PageMovable(page))) in unmap_and_move()
1237 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1238 page_is_file_lru(page), -thp_nr_pages(page)); in unmap_and_move()
1242 * We release the page in page_handle_poison. in unmap_and_move()
1244 put_page(page); in unmap_and_move()
1247 list_add_tail(&page->lru, ret); in unmap_and_move()
1266 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1267 * under direct I/O, the reference of the head page is 512 and a bit more.)
1272 * There is also no race when direct I/O is issued on the page under migration,
1274 * will wait in the page fault for migration to complete.
1278 struct page *hpage, int force, in unmap_and_move_huge_page()
1284 struct page *new_hpage; in unmap_and_move_huge_page()
1291 * like soft offline and memory hotremove don't walk through page in unmap_and_move_huge_page()
1301 /* page was freed from under us. So we are done. */ in unmap_and_move_huge_page()
1325 * page_mapping() set, hugetlbfs specific move page routine will not in unmap_and_move_huge_page()
1405 static inline int try_split_thp(struct page *page, struct page **page2, in try_split_thp() argument
1410 lock_page(page); in try_split_thp()
1411 rc = split_huge_page_to_list(page, from); in try_split_thp()
1412 unlock_page(page); in try_split_thp()
1414 list_safe_reset_next(page, *page2, lru); in try_split_thp()
1421 * supplied as the target for the page migration
1425 * as the target of the page migration.
1430 * page migration, if any.
1431 * @reason: The reason for page migration.
1455 struct page *page; in migrate_pages() local
1456 struct page *page2; in migrate_pages()
1471 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1474 * THP statistics is based on the source huge page. in migrate_pages()
1478 is_thp = PageTransHuge(page) && !PageHuge(page); in migrate_pages()
1479 nr_subpages = thp_nr_pages(page); in migrate_pages()
1482 if (PageHuge(page)) in migrate_pages()
1484 put_new_page, private, page, in migrate_pages()
1489 private, page, pass > 2, mode, in migrate_pages()
1493 * Success: non hugetlb page will be freed, hugetlb in migrate_pages()
1494 * page will be put back in migrate_pages()
1504 * retry on the same page with the THP split in migrate_pages()
1507 * Head page is retried immediately and tail in migrate_pages()
1515 if (!try_split_thp(page, &page2, from)) { in migrate_pages()
1535 if (!try_split_thp(page, &page2, from)) { in migrate_pages()
1564 * unlike -EAGAIN case, the failed page is in migrate_pages()
1565 * removed from migration page list and not in migrate_pages()
1583 * Put the permanent failure page back to migration list, they in migrate_pages()
1605 struct page *alloc_migration_target(struct page *page, unsigned long private) in alloc_migration_target() argument
1610 struct page *new_page = NULL; in alloc_migration_target()
1618 nid = page_to_nid(page); in alloc_migration_target()
1620 if (PageHuge(page)) { in alloc_migration_target()
1621 struct hstate *h = page_hstate(compound_head(page)); in alloc_migration_target()
1627 if (PageTransHuge(page)) { in alloc_migration_target()
1636 zidx = zone_idx(page_zone(page)); in alloc_migration_target()
1678 * Resolves the given address to a struct page, isolates it from the LRU and
1681 * errno - if the page cannot be found/isolated
1690 struct page *page; in add_page_for_migration() local
1702 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1704 err = PTR_ERR(page); in add_page_for_migration()
1705 if (IS_ERR(page)) in add_page_for_migration()
1709 if (!page) in add_page_for_migration()
1713 if (page_to_nid(page) == node) in add_page_for_migration()
1717 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1720 if (PageHuge(page)) { in add_page_for_migration()
1721 if (PageHead(page)) { in add_page_for_migration()
1722 isolate_huge_page(page, pagelist); in add_page_for_migration()
1726 struct page *head; in add_page_for_migration()
1728 head = compound_head(page); in add_page_for_migration()
1742 * isolate_lru_page() or drop the page ref if it was in add_page_for_migration()
1745 put_page(page); in add_page_for_migration()
1778 * Migrate an array of page address onto an array of nodes and fill
1829 * Errors in the page lookup or isolation are not fatal and we simply in do_pages_move()
1836 /* The page is successfully queued for migration */ in do_pages_move()
1841 * If the page is already on the target node (!err), store the in do_pages_move()
1878 struct page *page; in do_pages_stat_array() local
1886 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1888 err = PTR_ERR(page); in do_pages_stat_array()
1889 if (IS_ERR(page)) in do_pages_stat_array()
1892 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
2077 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
2081 struct page *newpage; in alloc_misplaced_dst_page()
2092 static struct page *alloc_misplaced_dst_page_thp(struct page *page, in alloc_misplaced_dst_page_thp() argument
2096 struct page *newpage; in alloc_misplaced_dst_page_thp()
2109 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
2112 int nr_pages = thp_nr_pages(page); in numamigrate_isolate_page()
2114 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
2117 if (PageTransHuge(page) && total_mapcount(page) > 1) in numamigrate_isolate_page()
2124 if (isolate_lru_page(page)) in numamigrate_isolate_page()
2127 page_lru = page_is_file_lru(page); in numamigrate_isolate_page()
2128 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
2132 * Isolating the page has taken another reference, so the in numamigrate_isolate_page()
2133 * caller's reference can be safely dropped without the page in numamigrate_isolate_page()
2136 put_page(page); in numamigrate_isolate_page()
2141 * Attempt to migrate a misplaced page to the specified destination
2143 * the page that will be dropped by this function before returning.
2145 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
2154 int nr_pages = thp_nr_pages(page); in migrate_misplaced_page()
2157 * PTE mapped THP or HugeTLB page can't reach here so the page could in migrate_misplaced_page()
2158 * be either base page or THP. And it must be head page if it is in migrate_misplaced_page()
2161 compound = PageTransHuge(page); in migrate_misplaced_page()
2172 if (page_mapcount(page) != 1 && page_is_file_lru(page) && in migrate_misplaced_page()
2180 if (page_is_file_lru(page) && PageDirty(page)) in migrate_misplaced_page()
2183 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
2187 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
2192 list_del(&page->lru); in migrate_misplaced_page()
2193 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in migrate_misplaced_page()
2194 page_is_file_lru(page), -nr_pages); in migrate_misplaced_page()
2195 putback_lru_page(page); in migrate_misplaced_page()
2204 put_page(page); in migrate_misplaced_page()
2265 struct page *page; in migrate_vma_collect_pmd() local
2273 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2274 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2283 get_page(page); in migrate_vma_collect_pmd()
2285 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2288 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2289 unlock_page(page); in migrate_vma_collect_pmd()
2290 put_page(page); in migrate_vma_collect_pmd()
2308 struct page *page; in migrate_vma_collect_pmd() local
2324 * Only care about unaddressable device page special in migrate_vma_collect_pmd()
2325 * page table entry. Other special swap entries are not in migrate_vma_collect_pmd()
2326 * migratable, and we ignore regular swapped page. in migrate_vma_collect_pmd()
2332 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd()
2335 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
2338 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
2351 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2357 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2363 * By getting a reference on the page we pin it and that blocks in migrate_vma_collect_pmd()
2367 * We drop this reference after isolating the page from the lru in migrate_vma_collect_pmd()
2368 * for non device page (device page are not on the lru and thus in migrate_vma_collect_pmd()
2371 get_page(page); in migrate_vma_collect_pmd()
2375 * Optimize for the common case where page is only mapped once in migrate_vma_collect_pmd()
2376 * in one process. If we can lock the page, then we can safely in migrate_vma_collect_pmd()
2377 * set up a special migration page table entry now. in migrate_vma_collect_pmd()
2379 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2385 /* Setup special migration page table entry */ in migrate_vma_collect_pmd()
2388 page_to_pfn(page)); in migrate_vma_collect_pmd()
2391 page_to_pfn(page)); in migrate_vma_collect_pmd()
2408 * drop page refcount. Page won't be freed, as we took in migrate_vma_collect_pmd()
2411 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2412 put_page(page); in migrate_vma_collect_pmd()
2441 * This will walk the CPU page table. For each virtual address backed by a
2442 * valid page, it updates the src array and takes a reference on the page, in
2443 * order to pin the page until we lock it and unmap it.
2452 * private page mappings that won't be migrated. in migrate_vma_collect()
2467 * migrate_vma_check_page() - check if page is pinned or not
2468 * @page: struct page to check
2472 * ZONE_DEVICE page.
2474 static bool migrate_vma_check_page(struct page *page) in migrate_vma_check_page() argument
2478 * isolate_lru_page() for a regular page, or migrate_vma_collect() for in migrate_vma_check_page()
2479 * a device page. in migrate_vma_check_page()
2484 * FIXME support THP (transparent huge page), it is bit more complex to in migrate_vma_check_page()
2488 if (PageCompound(page)) in migrate_vma_check_page()
2491 /* Page from ZONE_DEVICE have one extra reference */ in migrate_vma_check_page()
2492 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2494 * Private page can never be pin as they have no valid pte and in migrate_vma_check_page()
2497 * will bump the page reference count. Sadly there is no way to in migrate_vma_check_page()
2504 * it does not need to take a reference on page. in migrate_vma_check_page()
2506 return is_device_private_page(page); in migrate_vma_check_page()
2509 /* For file back page */ in migrate_vma_check_page()
2510 if (page_mapping(page)) in migrate_vma_check_page()
2511 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2513 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2524 * page is locked it is isolated from the lru (for non-device pages). Finally,
2538 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2541 if (!page) in migrate_vma_prepare()
2548 * are waiting on each other page lock. in migrate_vma_prepare()
2551 * for any page we can not lock right away. in migrate_vma_prepare()
2553 if (!trylock_page(page)) { in migrate_vma_prepare()
2556 put_page(page); in migrate_vma_prepare()
2564 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2565 if (!PageLRU(page) && allow_drain) { in migrate_vma_prepare()
2571 if (isolate_lru_page(page)) { in migrate_vma_prepare()
2578 unlock_page(page); in migrate_vma_prepare()
2580 put_page(page); in migrate_vma_prepare()
2586 put_page(page); in migrate_vma_prepare()
2589 if (!migrate_vma_check_page(page)) { in migrate_vma_prepare()
2595 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2596 get_page(page); in migrate_vma_prepare()
2597 putback_lru_page(page); in migrate_vma_prepare()
2601 unlock_page(page); in migrate_vma_prepare()
2604 if (!is_zone_device_page(page)) in migrate_vma_prepare()
2605 putback_lru_page(page); in migrate_vma_prepare()
2607 put_page(page); in migrate_vma_prepare()
2613 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2615 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2618 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2621 unlock_page(page); in migrate_vma_prepare()
2622 put_page(page); in migrate_vma_prepare()
2628 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2631 * Replace page mapping (CPU page table pte) with a special migration pte entry
2636 * destination memory and copy contents of original page over to new page.
2645 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2647 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2650 if (page_mapped(page)) { in migrate_vma_unmap()
2651 try_to_migrate(page, 0); in migrate_vma_unmap()
2652 if (page_mapped(page)) in migrate_vma_unmap()
2656 if (migrate_vma_check_page(page)) in migrate_vma_unmap()
2666 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2668 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2671 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2674 unlock_page(page); in migrate_vma_unmap()
2677 if (is_zone_device_page(page)) in migrate_vma_unmap()
2678 put_page(page); in migrate_vma_unmap()
2680 putback_lru_page(page); in migrate_vma_unmap()
2694 * and unmapped, check whether each page is pinned or not. Pages that aren't
2703 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2708 * device memory to system memory. If the caller cannot migrate a device page
2713 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2725 * then migrate_vma_pages() to migrate struct page information from the source
2726 * struct page to the destination struct page. If it fails to migrate the
2727 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2738 * It is safe to update device page table after migrate_vma_pages() because
2739 * both destination and source page are still locked, and the mmap_lock is held
2742 * Once the caller is done cleaning up things and updating its page table (if it
2744 * migrate_vma_finalize() to update the CPU page table to point to new pages
2745 * for successfully migrated pages or otherwise restore the CPU page table to
2793 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2794 * private page.
2798 struct page *page, in migrate_vma_insert_page() argument
2849 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
2854 * preceding stores to the page contents become visible before in migrate_vma_insert_page()
2857 __SetPageUptodate(page); in migrate_vma_insert_page()
2859 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2860 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2865 page_to_pfn(page)); in migrate_vma_insert_page()
2868 page_to_pfn(page)); in migrate_vma_insert_page()
2875 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); in migrate_vma_insert_page()
2879 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2906 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2907 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2908 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
2909 get_page(page); in migrate_vma_insert_page()
2933 * migrate_vma_pages() - migrate meta-data from src page to dst page
2936 * This migrates struct page meta-data from source struct page to destination
2937 * struct page. This effectively finishes the migration from source page to the
2938 * destination page.
2949 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2950 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
2959 if (!page) { in migrate_vma_pages()
2976 mapping = page_mapping(page); in migrate_vma_pages()
2990 * Other types of ZONE_DEVICE page are not in migrate_vma_pages()
2998 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); in migrate_vma_pages()
3014 * migrate_vma_finalize() - restore CPU page table entry
3018 * new page if migration was successful for that page, or to the original page
3030 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
3031 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
3033 if (!page) { in migrate_vma_finalize()
3046 newpage = page; in migrate_vma_finalize()
3049 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
3050 unlock_page(page); in migrate_vma_finalize()
3052 if (is_zone_device_page(page)) in migrate_vma_finalize()
3053 put_page(page); in migrate_vma_finalize()
3055 putback_lru_page(page); in migrate_vma_finalize()
3057 if (newpage != page) { in migrate_vma_finalize()
3134 * page allocator zonelists.