Lines Matching refs:page
84 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
97 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
105 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
118 if (unlikely(!trylock_page(page))) in isolate_movable_page()
121 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
124 mapping = page_mapping(page); in isolate_movable_page()
125 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
127 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
131 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
132 __SetPageIsolated(page); in isolate_movable_page()
133 unlock_page(page); in isolate_movable_page()
138 unlock_page(page); in isolate_movable_page()
140 put_page(page); in isolate_movable_page()
146 void putback_movable_page(struct page *page) in putback_movable_page() argument
150 VM_BUG_ON_PAGE(!PageLocked(page), page); in putback_movable_page()
151 VM_BUG_ON_PAGE(!PageMovable(page), page); in putback_movable_page()
152 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_page()
154 mapping = page_mapping(page); in putback_movable_page()
155 mapping->a_ops->putback_page(page); in putback_movable_page()
156 __ClearPageIsolated(page); in putback_movable_page()
169 struct page *page; in putback_movable_pages() local
170 struct page *page2; in putback_movable_pages()
172 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
173 if (unlikely(PageHuge(page))) { in putback_movable_pages()
174 putback_active_hugepage(page); in putback_movable_pages()
177 list_del(&page->lru); in putback_movable_pages()
183 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
184 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
185 lock_page(page); in putback_movable_pages()
186 if (PageMovable(page)) in putback_movable_pages()
187 putback_movable_page(page); in putback_movable_pages()
189 __ClearPageIsolated(page); in putback_movable_pages()
190 unlock_page(page); in putback_movable_pages()
191 put_page(page); in putback_movable_pages()
193 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
194 page_is_file_cache(page), -hpage_nr_pages(page)); in putback_movable_pages()
195 putback_lru_page(page); in putback_movable_pages()
203 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
207 .page = old, in remove_migration_pte()
212 struct page *new; in remove_migration_pte()
216 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
218 if (PageKsm(page)) in remove_migration_pte()
219 new = page; in remove_migration_pte()
221 new = page - pvmw.page->index + in remove_migration_pte()
227 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
278 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
279 clear_page_mlock(page); in remove_migration_pte()
292 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
315 struct page *page; in __migration_entry_wait() local
326 page = migration_entry_to_page(entry); in __migration_entry_wait()
335 if (!get_page_unless_zero(page)) in __migration_entry_wait()
338 wait_on_page_locked(page); in __migration_entry_wait()
339 put_page(page); in __migration_entry_wait()
364 struct page *page; in pmd_migration_entry_wait() local
369 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
370 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
373 wait_on_page_locked(page); in pmd_migration_entry_wait()
374 put_page(page); in pmd_migration_entry_wait()
440 struct page *newpage, struct page *page, in migrate_page_move_mapping() argument
453 expected_count += is_device_private_page(page); in migrate_page_move_mapping()
454 expected_count += is_device_public_page(page); in migrate_page_move_mapping()
458 if (page_count(page) != expected_count) in migrate_page_move_mapping()
462 newpage->index = page->index; in migrate_page_move_mapping()
463 newpage->mapping = page->mapping; in migrate_page_move_mapping()
464 if (PageSwapBacked(page)) in migrate_page_move_mapping()
470 oldzone = page_zone(page); in migrate_page_move_mapping()
476 page_index(page)); in migrate_page_move_mapping()
478 expected_count += hpage_nr_pages(page) + page_has_private(page); in migrate_page_move_mapping()
479 if (page_count(page) != expected_count || in migrate_page_move_mapping()
481 &mapping->i_pages.xa_lock) != page) { in migrate_page_move_mapping()
486 if (!page_ref_freeze(page, expected_count)) { in migrate_page_move_mapping()
500 page_ref_unfreeze(page, expected_count); in migrate_page_move_mapping()
509 newpage->index = page->index; in migrate_page_move_mapping()
510 newpage->mapping = page->mapping; in migrate_page_move_mapping()
511 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ in migrate_page_move_mapping()
512 if (PageSwapBacked(page)) { in migrate_page_move_mapping()
514 if (PageSwapCache(page)) { in migrate_page_move_mapping()
516 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
519 VM_BUG_ON_PAGE(PageSwapCache(page), page); in migrate_page_move_mapping()
523 dirty = PageDirty(page); in migrate_page_move_mapping()
525 ClearPageDirty(page); in migrate_page_move_mapping()
530 if (PageTransHuge(page)) { in migrate_page_move_mapping()
532 int index = page_index(page); in migrate_page_move_mapping()
547 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); in migrate_page_move_mapping()
565 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
587 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
594 pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
596 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
597 if (page_count(page) != expected_count || in migrate_huge_page_move_mapping()
598 radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) { in migrate_huge_page_move_mapping()
603 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
608 newpage->index = page->index; in migrate_huge_page_move_mapping()
609 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
615 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
627 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page()
631 struct page *dst_base = dst; in __copy_gigantic_page()
632 struct page *src_base = src; in __copy_gigantic_page()
644 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page()
673 void migrate_page_states(struct page *newpage, struct page *page) in migrate_page_states() argument
677 if (PageError(page)) in migrate_page_states()
679 if (PageReferenced(page)) in migrate_page_states()
681 if (PageUptodate(page)) in migrate_page_states()
683 if (TestClearPageActive(page)) { in migrate_page_states()
684 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_states()
686 } else if (TestClearPageUnevictable(page)) in migrate_page_states()
688 if (PageChecked(page)) in migrate_page_states()
690 if (PageMappedToDisk(page)) in migrate_page_states()
694 if (PageDirty(page)) in migrate_page_states()
697 if (page_is_young(page)) in migrate_page_states()
699 if (page_is_idle(page)) in migrate_page_states()
706 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_states()
709 ksm_migrate_page(newpage, page); in migrate_page_states()
714 if (PageSwapCache(page)) in migrate_page_states()
715 ClearPageSwapCache(page); in migrate_page_states()
716 ClearPagePrivate(page); in migrate_page_states()
717 set_page_private(page, 0); in migrate_page_states()
726 copy_page_owner(page, newpage); in migrate_page_states()
728 mem_cgroup_migrate(page, newpage); in migrate_page_states()
732 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
734 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
735 copy_huge_page(newpage, page); in migrate_page_copy()
737 copy_highpage(newpage, page); in migrate_page_copy()
739 migrate_page_states(newpage, page); in migrate_page_copy()
754 struct page *newpage, struct page *page, in migrate_page() argument
759 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page()
761 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); in migrate_page()
767 migrate_page_copy(newpage, page); in migrate_page()
769 migrate_page_states(newpage, page); in migrate_page()
781 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
786 if (!page_has_buffers(page)) in buffer_migrate_page()
787 return migrate_page(mapping, newpage, page, mode); in buffer_migrate_page()
789 head = page_buffers(page); in buffer_migrate_page()
791 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); in buffer_migrate_page()
804 ClearPagePrivate(page); in buffer_migrate_page()
805 set_page_private(newpage, page_private(page)); in buffer_migrate_page()
806 set_page_private(page, 0); in buffer_migrate_page()
807 put_page(page); in buffer_migrate_page()
820 migrate_page_copy(newpage, page); in buffer_migrate_page()
822 migrate_page_states(newpage, page); in buffer_migrate_page()
840 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
855 if (!clear_page_dirty_for_io(page)) in writeout()
867 remove_migration_ptes(page, page, false); in writeout()
869 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
873 lock_page(page); in writeout()
882 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
884 if (PageDirty(page)) { in fallback_migrate_page()
893 return writeout(mapping, page); in fallback_migrate_page()
900 if (page_has_private(page) && in fallback_migrate_page()
901 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
904 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
918 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
923 bool is_lru = !__PageMovable(page); in move_to_new_page()
925 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
928 mapping = page_mapping(page); in move_to_new_page()
932 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
942 page, mode); in move_to_new_page()
945 page, mode); in move_to_new_page()
951 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
952 if (!PageMovable(page)) { in move_to_new_page()
954 __ClearPageIsolated(page); in move_to_new_page()
959 page, mode); in move_to_new_page()
961 !PageIsolated(page)); in move_to_new_page()
969 if (__PageMovable(page)) { in move_to_new_page()
970 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
976 __ClearPageIsolated(page); in move_to_new_page()
984 if (!PageMappingFlags(page)) in move_to_new_page()
985 page->mapping = NULL; in move_to_new_page()
991 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
997 bool is_lru = !__PageMovable(page); in __unmap_and_move()
999 if (!trylock_page(page)) { in __unmap_and_move()
1019 lock_page(page); in __unmap_and_move()
1022 if (PageWriteback(page)) { in __unmap_and_move()
1039 wait_on_page_writeback(page); in __unmap_and_move()
1056 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1057 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1071 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1087 if (!page->mapping) { in __unmap_and_move()
1088 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1089 if (page_has_private(page)) { in __unmap_and_move()
1090 try_to_free_buffers(page); in __unmap_and_move()
1093 } else if (page_mapped(page)) { in __unmap_and_move()
1095 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1096 page); in __unmap_and_move()
1097 try_to_unmap(page, in __unmap_and_move()
1102 if (!page_mapped(page)) in __unmap_and_move()
1103 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1106 remove_migration_ptes(page, in __unmap_and_move()
1107 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1115 unlock_page(page); in __unmap_and_move()
1150 unsigned long private, struct page *page, in unmap_and_move() argument
1155 struct page *newpage; in unmap_and_move()
1157 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1160 newpage = get_new_page(page, private); in unmap_and_move()
1164 if (page_count(page) == 1) { in unmap_and_move()
1166 ClearPageActive(page); in unmap_and_move()
1167 ClearPageUnevictable(page); in unmap_and_move()
1168 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1169 lock_page(page); in unmap_and_move()
1170 if (!PageMovable(page)) in unmap_and_move()
1171 __ClearPageIsolated(page); in unmap_and_move()
1172 unlock_page(page); in unmap_and_move()
1181 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1193 list_del(&page->lru); in unmap_and_move()
1200 if (likely(!__PageMovable(page))) in unmap_and_move()
1201 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1202 page_is_file_cache(page), -hpage_nr_pages(page)); in unmap_and_move()
1211 put_page(page); in unmap_and_move()
1218 if (set_hwpoison_free_buddy_page(page)) in unmap_and_move()
1223 if (likely(!__PageMovable(page))) { in unmap_and_move()
1224 putback_lru_page(page); in unmap_and_move()
1228 lock_page(page); in unmap_and_move()
1229 if (PageMovable(page)) in unmap_and_move()
1230 putback_movable_page(page); in unmap_and_move()
1232 __ClearPageIsolated(page); in unmap_and_move()
1233 unlock_page(page); in unmap_and_move()
1234 put_page(page); in unmap_and_move()
1266 struct page *hpage, int force, in unmap_and_move_huge_page()
1271 struct page *new_hpage; in unmap_and_move_huge_page()
1380 struct page *page; in migrate_pages() local
1381 struct page *page2; in migrate_pages()
1391 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1395 if (PageHuge(page)) in migrate_pages()
1397 put_new_page, private, page, in migrate_pages()
1401 private, page, pass > 2, mode, in migrate_pages()
1417 if (PageTransHuge(page) && !PageHuge(page)) { in migrate_pages()
1418 lock_page(page); in migrate_pages()
1419 rc = split_huge_page_to_list(page, from); in migrate_pages()
1420 unlock_page(page); in migrate_pages()
1422 list_safe_reset_next(page, page2, lru); in migrate_pages()
1500 struct page *page; in add_page_for_migration() local
1512 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1514 err = PTR_ERR(page); in add_page_for_migration()
1515 if (IS_ERR(page)) in add_page_for_migration()
1519 if (!page) in add_page_for_migration()
1523 if (page_to_nid(page) == node) in add_page_for_migration()
1527 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1530 if (PageHuge(page)) { in add_page_for_migration()
1531 if (PageHead(page)) { in add_page_for_migration()
1532 isolate_huge_page(page, pagelist); in add_page_for_migration()
1536 struct page *head; in add_page_for_migration()
1538 head = compound_head(page); in add_page_for_migration()
1555 put_page(page); in add_page_for_migration()
1664 struct page *page; in do_pages_stat_array() local
1672 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1674 err = PTR_ERR(page); in do_pages_stat_array()
1675 if (IS_ERR(page)) in do_pages_stat_array()
1678 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
1846 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
1850 struct page *newpage; in alloc_misplaced_dst_page()
1861 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
1865 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
1868 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) in numamigrate_isolate_page()
1871 if (isolate_lru_page(page)) in numamigrate_isolate_page()
1881 if (PageTransHuge(page) && page_count(page) != 3) { in numamigrate_isolate_page()
1882 putback_lru_page(page); in numamigrate_isolate_page()
1886 page_lru = page_is_file_cache(page); in numamigrate_isolate_page()
1887 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
1888 hpage_nr_pages(page)); in numamigrate_isolate_page()
1895 put_page(page); in numamigrate_isolate_page()
1901 struct page *page = pmd_page(pmd); in pmd_trans_migrating() local
1902 return PageLocked(page); in pmd_trans_migrating()
1910 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
1922 if (page_mapcount(page) != 1 && page_is_file_cache(page) && in migrate_misplaced_page()
1930 if (page_is_file_cache(page) && PageDirty(page)) in migrate_misplaced_page()
1933 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
1937 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
1943 list_del(&page->lru); in migrate_misplaced_page()
1944 dec_node_page_state(page, NR_ISOLATED_ANON + in migrate_misplaced_page()
1945 page_is_file_cache(page)); in migrate_misplaced_page()
1946 putback_lru_page(page); in migrate_misplaced_page()
1955 put_page(page); in migrate_misplaced_page()
1969 struct page *page, int node) in migrate_misplaced_transhuge_page() argument
1974 struct page *new_page = NULL; in migrate_misplaced_transhuge_page()
1975 int page_lru = page_is_file_cache(page); in migrate_misplaced_transhuge_page()
1986 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_transhuge_page()
1994 if (PageSwapBacked(page)) in migrate_misplaced_transhuge_page()
1998 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page()
1999 new_page->index = page->index; in migrate_misplaced_transhuge_page()
2000 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page()
2006 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { in migrate_misplaced_transhuge_page()
2012 SetPageActive(page); in migrate_misplaced_transhuge_page()
2014 SetPageUnevictable(page); in migrate_misplaced_transhuge_page()
2020 get_page(page); in migrate_misplaced_transhuge_page()
2021 putback_lru_page(page); in migrate_misplaced_transhuge_page()
2022 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2044 page_ref_unfreeze(page, 2); in migrate_misplaced_transhuge_page()
2045 mlock_migrate_page(new_page, page); in migrate_misplaced_transhuge_page()
2046 page_remove_rmap(page, true); in migrate_misplaced_transhuge_page()
2061 unlock_page(page); in migrate_misplaced_transhuge_page()
2062 put_page(page); /* Drop the rmap reference */ in migrate_misplaced_transhuge_page()
2063 put_page(page); /* Drop the LRU isolation reference */ in migrate_misplaced_transhuge_page()
2068 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2084 unlock_page(page); in migrate_misplaced_transhuge_page()
2085 put_page(page); in migrate_misplaced_transhuge_page()
2152 struct page *page; in migrate_vma_collect_pmd() local
2160 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2161 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2170 get_page(page); in migrate_vma_collect_pmd()
2172 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2175 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2176 unlock_page(page); in migrate_vma_collect_pmd()
2177 put_page(page); in migrate_vma_collect_pmd()
2195 struct page *page; in migrate_vma_collect_pmd() local
2221 page = device_private_entry_to_page(entry); in migrate_vma_collect_pmd()
2222 mpfn = migrate_pfn(page_to_pfn(page))| in migrate_vma_collect_pmd()
2233 page = _vm_normal_page(migrate->vma, addr, pte, true); in migrate_vma_collect_pmd()
2239 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2243 pfn = page_to_pfn(page); in migrate_vma_collect_pmd()
2254 get_page(page); in migrate_vma_collect_pmd()
2262 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2269 entry = make_migration_entry(page, mpfn & in migrate_vma_collect_pmd()
2281 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2282 put_page(page); in migrate_vma_collect_pmd()
2342 static bool migrate_vma_check_page(struct page *page) in migrate_vma_check_page() argument
2356 if (PageCompound(page)) in migrate_vma_check_page()
2360 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2374 if (is_device_private_page(page)) in migrate_vma_check_page()
2381 if (!is_device_public_page(page)) in migrate_vma_check_page()
2387 if (page_mapping(page)) in migrate_vma_check_page()
2388 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2390 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2415 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2418 if (!page) in migrate_vma_prepare()
2430 if (!trylock_page(page)) { in migrate_vma_prepare()
2433 put_page(page); in migrate_vma_prepare()
2441 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2442 if (!PageLRU(page) && allow_drain) { in migrate_vma_prepare()
2448 if (isolate_lru_page(page)) { in migrate_vma_prepare()
2455 unlock_page(page); in migrate_vma_prepare()
2457 put_page(page); in migrate_vma_prepare()
2463 put_page(page); in migrate_vma_prepare()
2466 if (!migrate_vma_check_page(page)) { in migrate_vma_prepare()
2472 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2473 get_page(page); in migrate_vma_prepare()
2474 putback_lru_page(page); in migrate_vma_prepare()
2478 unlock_page(page); in migrate_vma_prepare()
2481 if (!is_zone_device_page(page)) in migrate_vma_prepare()
2482 putback_lru_page(page); in migrate_vma_prepare()
2484 put_page(page); in migrate_vma_prepare()
2490 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2492 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2495 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2498 unlock_page(page); in migrate_vma_prepare()
2499 put_page(page); in migrate_vma_prepare()
2523 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2525 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2528 if (page_mapped(page)) { in migrate_vma_unmap()
2529 try_to_unmap(page, flags); in migrate_vma_unmap()
2530 if (page_mapped(page)) in migrate_vma_unmap()
2534 if (migrate_vma_check_page(page)) in migrate_vma_unmap()
2544 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2546 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2549 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2552 unlock_page(page); in migrate_vma_unmap()
2555 if (is_zone_device_page(page)) in migrate_vma_unmap()
2556 put_page(page); in migrate_vma_unmap()
2558 putback_lru_page(page); in migrate_vma_unmap()
2564 struct page *page, in migrate_vma_insert_page() argument
2617 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) in migrate_vma_insert_page()
2625 __SetPageUptodate(page); in migrate_vma_insert_page()
2627 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2628 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2631 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2633 } else if (is_device_public_page(page)) { in migrate_vma_insert_page()
2634 entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in migrate_vma_insert_page()
2640 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2652 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2658 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2668 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2673 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2674 mem_cgroup_commit_charge(page, memcg, false, false); in migrate_vma_insert_page()
2675 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2676 lru_cache_add_active_or_unevictable(page, vma); in migrate_vma_insert_page()
2677 get_page(page); in migrate_vma_insert_page()
2716 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2717 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
2726 if (!page) { in migrate_vma_pages()
2743 mapping = page_mapping(page); in migrate_vma_pages()
2765 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); in migrate_vma_pages()
2797 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
2798 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
2800 if (!page) { in migrate_vma_finalize()
2813 newpage = page; in migrate_vma_finalize()
2816 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
2817 unlock_page(page); in migrate_vma_finalize()
2820 if (is_zone_device_page(page)) in migrate_vma_finalize()
2821 put_page(page); in migrate_vma_finalize()
2823 putback_lru_page(page); in migrate_vma_finalize()
2825 if (newpage != page) { in migrate_vma_finalize()