Lines Matching refs:page

85 int isolate_movable_page(struct page *page, isolate_mode_t mode)  in isolate_movable_page()  argument
98 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
106 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
119 if (unlikely(!trylock_page(page))) in isolate_movable_page()
122 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
125 mapping = page_mapping(page); in isolate_movable_page()
126 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
128 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
132 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
133 __SetPageIsolated(page); in isolate_movable_page()
134 unlock_page(page); in isolate_movable_page()
139 unlock_page(page); in isolate_movable_page()
141 put_page(page); in isolate_movable_page()
147 void putback_movable_page(struct page *page) in putback_movable_page() argument
151 VM_BUG_ON_PAGE(!PageLocked(page), page); in putback_movable_page()
152 VM_BUG_ON_PAGE(!PageMovable(page), page); in putback_movable_page()
153 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_page()
155 mapping = page_mapping(page); in putback_movable_page()
156 mapping->a_ops->putback_page(page); in putback_movable_page()
157 __ClearPageIsolated(page); in putback_movable_page()
170 struct page *page; in putback_movable_pages() local
171 struct page *page2; in putback_movable_pages()
173 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
174 if (unlikely(PageHuge(page))) { in putback_movable_pages()
175 putback_active_hugepage(page); in putback_movable_pages()
178 list_del(&page->lru); in putback_movable_pages()
184 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
185 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
186 lock_page(page); in putback_movable_pages()
187 if (PageMovable(page)) in putback_movable_pages()
188 putback_movable_page(page); in putback_movable_pages()
190 __ClearPageIsolated(page); in putback_movable_pages()
191 unlock_page(page); in putback_movable_pages()
192 put_page(page); in putback_movable_pages()
194 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
195 page_is_file_cache(page), -hpage_nr_pages(page)); in putback_movable_pages()
196 putback_lru_page(page); in putback_movable_pages()
204 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
208 .page = old, in remove_migration_pte()
213 struct page *new; in remove_migration_pte()
217 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
219 if (PageKsm(page)) in remove_migration_pte()
220 new = page; in remove_migration_pte()
222 new = page - pvmw.page->index + in remove_migration_pte()
228 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
275 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
276 clear_page_mlock(page); in remove_migration_pte()
289 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
312 struct page *page; in __migration_entry_wait() local
323 page = migration_entry_to_page(entry); in __migration_entry_wait()
330 if (!get_page_unless_zero(page)) in __migration_entry_wait()
333 put_and_wait_on_page_locked(page); in __migration_entry_wait()
358 struct page *page; in pmd_migration_entry_wait() local
363 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
364 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
367 put_and_wait_on_page_locked(page); in pmd_migration_entry_wait()
374 static int expected_page_refs(struct address_space *mapping, struct page *page) in expected_page_refs() argument
382 expected_count += is_device_private_page(page); in expected_page_refs()
384 expected_count += hpage_nr_pages(page) + page_has_private(page); in expected_page_refs()
398 struct page *newpage, struct page *page, int extra_count) in migrate_page_move_mapping() argument
400 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping()
403 int expected_count = expected_page_refs(mapping, page) + extra_count; in migrate_page_move_mapping()
407 if (page_count(page) != expected_count) in migrate_page_move_mapping()
411 newpage->index = page->index; in migrate_page_move_mapping()
412 newpage->mapping = page->mapping; in migrate_page_move_mapping()
413 if (PageSwapBacked(page)) in migrate_page_move_mapping()
419 oldzone = page_zone(page); in migrate_page_move_mapping()
423 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping()
428 if (!page_ref_freeze(page, expected_count)) { in migrate_page_move_mapping()
437 newpage->index = page->index; in migrate_page_move_mapping()
438 newpage->mapping = page->mapping; in migrate_page_move_mapping()
439 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ in migrate_page_move_mapping()
440 if (PageSwapBacked(page)) { in migrate_page_move_mapping()
442 if (PageSwapCache(page)) { in migrate_page_move_mapping()
444 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
447 VM_BUG_ON_PAGE(PageSwapCache(page), page); in migrate_page_move_mapping()
451 dirty = PageDirty(page); in migrate_page_move_mapping()
453 ClearPageDirty(page); in migrate_page_move_mapping()
458 if (PageTransHuge(page)) { in migrate_page_move_mapping()
472 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); in migrate_page_move_mapping()
490 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
512 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
514 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
518 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
519 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_huge_page_move_mapping()
524 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
529 newpage->index = page->index; in migrate_huge_page_move_mapping()
530 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
536 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
548 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page()
552 struct page *dst_base = dst; in __copy_gigantic_page()
553 struct page *src_base = src; in __copy_gigantic_page()
565 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page()
594 void migrate_page_states(struct page *newpage, struct page *page) in migrate_page_states() argument
598 if (PageError(page)) in migrate_page_states()
600 if (PageReferenced(page)) in migrate_page_states()
602 if (PageUptodate(page)) in migrate_page_states()
604 if (TestClearPageActive(page)) { in migrate_page_states()
605 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_states()
607 } else if (TestClearPageUnevictable(page)) in migrate_page_states()
609 if (PageWorkingset(page)) in migrate_page_states()
611 if (PageChecked(page)) in migrate_page_states()
613 if (PageMappedToDisk(page)) in migrate_page_states()
617 if (PageDirty(page)) in migrate_page_states()
620 if (page_is_young(page)) in migrate_page_states()
622 if (page_is_idle(page)) in migrate_page_states()
629 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_states()
632 ksm_migrate_page(newpage, page); in migrate_page_states()
637 if (PageSwapCache(page)) in migrate_page_states()
638 ClearPageSwapCache(page); in migrate_page_states()
639 ClearPagePrivate(page); in migrate_page_states()
640 set_page_private(page, 0); in migrate_page_states()
649 copy_page_owner(page, newpage); in migrate_page_states()
651 mem_cgroup_migrate(page, newpage); in migrate_page_states()
655 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
657 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
658 copy_huge_page(newpage, page); in migrate_page_copy()
660 copy_highpage(newpage, page); in migrate_page_copy()
662 migrate_page_states(newpage, page); in migrate_page_copy()
677 struct page *newpage, struct page *page, in migrate_page() argument
682 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page()
684 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in migrate_page()
690 migrate_page_copy(newpage, page); in migrate_page()
692 migrate_page_states(newpage, page); in migrate_page()
737 struct page *newpage, struct page *page, enum migrate_mode mode, in __buffer_migrate_page() argument
744 if (!page_has_buffers(page)) in __buffer_migrate_page()
745 return migrate_page(mapping, newpage, page, mode); in __buffer_migrate_page()
748 expected_count = expected_page_refs(mapping, page); in __buffer_migrate_page()
749 if (page_count(page) != expected_count) in __buffer_migrate_page()
752 head = page_buffers(page); in __buffer_migrate_page()
783 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in __buffer_migrate_page()
787 ClearPagePrivate(page); in __buffer_migrate_page()
788 set_page_private(newpage, page_private(page)); in __buffer_migrate_page()
789 set_page_private(page, 0); in __buffer_migrate_page()
790 put_page(page); in __buffer_migrate_page()
803 migrate_page_copy(newpage, page); in __buffer_migrate_page()
805 migrate_page_states(newpage, page); in __buffer_migrate_page()
827 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
829 return __buffer_migrate_page(mapping, newpage, page, mode, false); in buffer_migrate_page()
840 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page_norefs() argument
842 return __buffer_migrate_page(mapping, newpage, page, mode, true); in buffer_migrate_page_norefs()
849 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
864 if (!clear_page_dirty_for_io(page)) in writeout()
876 remove_migration_ptes(page, page, false); in writeout()
878 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
882 lock_page(page); in writeout()
891 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
893 if (PageDirty(page)) { in fallback_migrate_page()
902 return writeout(mapping, page); in fallback_migrate_page()
909 if (page_has_private(page) && in fallback_migrate_page()
910 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
913 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
927 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
932 bool is_lru = !__PageMovable(page); in move_to_new_page()
934 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
937 mapping = page_mapping(page); in move_to_new_page()
941 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
951 page, mode); in move_to_new_page()
954 page, mode); in move_to_new_page()
960 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
961 if (!PageMovable(page)) { in move_to_new_page()
963 __ClearPageIsolated(page); in move_to_new_page()
968 page, mode); in move_to_new_page()
970 !PageIsolated(page)); in move_to_new_page()
978 if (__PageMovable(page)) { in move_to_new_page()
979 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
985 __ClearPageIsolated(page); in move_to_new_page()
993 if (!PageMappingFlags(page)) in move_to_new_page()
994 page->mapping = NULL; in move_to_new_page()
1004 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
1010 bool is_lru = !__PageMovable(page); in __unmap_and_move()
1012 if (!trylock_page(page)) { in __unmap_and_move()
1032 lock_page(page); in __unmap_and_move()
1035 if (PageWriteback(page)) { in __unmap_and_move()
1052 wait_on_page_writeback(page); in __unmap_and_move()
1069 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1070 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1084 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1100 if (!page->mapping) { in __unmap_and_move()
1101 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1102 if (page_has_private(page)) { in __unmap_and_move()
1103 try_to_free_buffers(page); in __unmap_and_move()
1106 } else if (page_mapped(page)) { in __unmap_and_move()
1108 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1109 page); in __unmap_and_move()
1110 try_to_unmap(page, in __unmap_and_move()
1115 if (!page_mapped(page)) in __unmap_and_move()
1116 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1119 remove_migration_ptes(page, in __unmap_and_move()
1120 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1128 unlock_page(page); in __unmap_and_move()
1166 unsigned long private, struct page *page, in unmap_and_move() argument
1171 struct page *newpage; in unmap_and_move()
1173 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1176 newpage = get_new_page(page, private); in unmap_and_move()
1180 if (page_count(page) == 1) { in unmap_and_move()
1182 ClearPageActive(page); in unmap_and_move()
1183 ClearPageUnevictable(page); in unmap_and_move()
1184 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1185 lock_page(page); in unmap_and_move()
1186 if (!PageMovable(page)) in unmap_and_move()
1187 __ClearPageIsolated(page); in unmap_and_move()
1188 unlock_page(page); in unmap_and_move()
1197 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1209 list_del(&page->lru); in unmap_and_move()
1216 if (likely(!__PageMovable(page))) in unmap_and_move()
1217 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1218 page_is_file_cache(page), -hpage_nr_pages(page)); in unmap_and_move()
1227 put_page(page); in unmap_and_move()
1234 if (set_hwpoison_free_buddy_page(page)) in unmap_and_move()
1239 if (likely(!__PageMovable(page))) { in unmap_and_move()
1240 putback_lru_page(page); in unmap_and_move()
1244 lock_page(page); in unmap_and_move()
1245 if (PageMovable(page)) in unmap_and_move()
1246 putback_movable_page(page); in unmap_and_move()
1248 __ClearPageIsolated(page); in unmap_and_move()
1249 unlock_page(page); in unmap_and_move()
1250 put_page(page); in unmap_and_move()
1282 struct page *hpage, int force, in unmap_and_move_huge_page()
1287 struct page *new_hpage; in unmap_and_move_huge_page()
1407 struct page *page; in migrate_pages() local
1408 struct page *page2; in migrate_pages()
1418 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1422 if (PageHuge(page)) in migrate_pages()
1424 put_new_page, private, page, in migrate_pages()
1428 private, page, pass > 2, mode, in migrate_pages()
1444 if (PageTransHuge(page) && !PageHuge(page)) { in migrate_pages()
1445 lock_page(page); in migrate_pages()
1446 rc = split_huge_page_to_list(page, from); in migrate_pages()
1447 unlock_page(page); in migrate_pages()
1449 list_safe_reset_next(page, page2, lru); in migrate_pages()
1527 struct page *page; in add_page_for_migration() local
1539 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1541 err = PTR_ERR(page); in add_page_for_migration()
1542 if (IS_ERR(page)) in add_page_for_migration()
1546 if (!page) in add_page_for_migration()
1550 if (page_to_nid(page) == node) in add_page_for_migration()
1554 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1557 if (PageHuge(page)) { in add_page_for_migration()
1558 if (PageHead(page)) { in add_page_for_migration()
1559 isolate_huge_page(page, pagelist); in add_page_for_migration()
1563 struct page *head; in add_page_for_migration()
1565 head = compound_head(page); in add_page_for_migration()
1582 put_page(page); in add_page_for_migration()
1691 struct page *page; in do_pages_stat_array() local
1699 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1701 err = PTR_ERR(page); in do_pages_stat_array()
1702 if (IS_ERR(page)) in do_pages_stat_array()
1705 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
1873 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
1877 struct page *newpage; in alloc_misplaced_dst_page()
1888 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
1892 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
1895 if (!migrate_balanced_pgdat(pgdat, compound_nr(page))) in numamigrate_isolate_page()
1898 if (isolate_lru_page(page)) in numamigrate_isolate_page()
1908 if (PageTransHuge(page) && page_count(page) != 3) { in numamigrate_isolate_page()
1909 putback_lru_page(page); in numamigrate_isolate_page()
1913 page_lru = page_is_file_cache(page); in numamigrate_isolate_page()
1914 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
1915 hpage_nr_pages(page)); in numamigrate_isolate_page()
1922 put_page(page); in numamigrate_isolate_page()
1928 struct page *page = pmd_page(pmd); in pmd_trans_migrating() local
1929 return PageLocked(page); in pmd_trans_migrating()
1937 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
1949 if (page_mapcount(page) != 1 && page_is_file_cache(page) && in migrate_misplaced_page()
1957 if (page_is_file_cache(page) && PageDirty(page)) in migrate_misplaced_page()
1960 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
1964 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
1970 list_del(&page->lru); in migrate_misplaced_page()
1971 dec_node_page_state(page, NR_ISOLATED_ANON + in migrate_misplaced_page()
1972 page_is_file_cache(page)); in migrate_misplaced_page()
1973 putback_lru_page(page); in migrate_misplaced_page()
1982 put_page(page); in migrate_misplaced_page()
1996 struct page *page, int node) in migrate_misplaced_transhuge_page() argument
2001 struct page *new_page = NULL; in migrate_misplaced_transhuge_page()
2002 int page_lru = page_is_file_cache(page); in migrate_misplaced_transhuge_page()
2012 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_transhuge_page()
2020 if (PageSwapBacked(page)) in migrate_misplaced_transhuge_page()
2024 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page()
2025 new_page->index = page->index; in migrate_misplaced_transhuge_page()
2028 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page()
2033 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { in migrate_misplaced_transhuge_page()
2038 SetPageActive(page); in migrate_misplaced_transhuge_page()
2040 SetPageUnevictable(page); in migrate_misplaced_transhuge_page()
2046 get_page(page); in migrate_misplaced_transhuge_page()
2047 putback_lru_page(page); in migrate_misplaced_transhuge_page()
2048 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2080 page_ref_unfreeze(page, 2); in migrate_misplaced_transhuge_page()
2081 mlock_migrate_page(new_page, page); in migrate_misplaced_transhuge_page()
2082 page_remove_rmap(page, true); in migrate_misplaced_transhuge_page()
2092 unlock_page(page); in migrate_misplaced_transhuge_page()
2093 put_page(page); /* Drop the rmap reference */ in migrate_misplaced_transhuge_page()
2094 put_page(page); /* Drop the LRU isolation reference */ in migrate_misplaced_transhuge_page()
2099 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2115 unlock_page(page); in migrate_misplaced_transhuge_page()
2116 put_page(page); in migrate_misplaced_transhuge_page()
2173 struct page *page; in migrate_vma_collect_pmd() local
2181 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2182 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2191 get_page(page); in migrate_vma_collect_pmd()
2193 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2196 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2197 unlock_page(page); in migrate_vma_collect_pmd()
2198 put_page(page); in migrate_vma_collect_pmd()
2216 struct page *page; in migrate_vma_collect_pmd() local
2240 page = device_private_entry_to_page(entry); in migrate_vma_collect_pmd()
2241 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
2252 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2258 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2272 get_page(page); in migrate_vma_collect_pmd()
2280 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2287 entry = make_migration_entry(page, mpfn & in migrate_vma_collect_pmd()
2299 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2300 put_page(page); in migrate_vma_collect_pmd()
2356 static bool migrate_vma_check_page(struct page *page) in migrate_vma_check_page() argument
2370 if (PageCompound(page)) in migrate_vma_check_page()
2374 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2388 return is_device_private_page(page); in migrate_vma_check_page()
2392 if (page_mapping(page)) in migrate_vma_check_page()
2393 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2395 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2420 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2423 if (!page) in migrate_vma_prepare()
2435 if (!trylock_page(page)) { in migrate_vma_prepare()
2438 put_page(page); in migrate_vma_prepare()
2446 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2447 if (!PageLRU(page) && allow_drain) { in migrate_vma_prepare()
2453 if (isolate_lru_page(page)) { in migrate_vma_prepare()
2460 unlock_page(page); in migrate_vma_prepare()
2462 put_page(page); in migrate_vma_prepare()
2468 put_page(page); in migrate_vma_prepare()
2471 if (!migrate_vma_check_page(page)) { in migrate_vma_prepare()
2477 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2478 get_page(page); in migrate_vma_prepare()
2479 putback_lru_page(page); in migrate_vma_prepare()
2483 unlock_page(page); in migrate_vma_prepare()
2486 if (!is_zone_device_page(page)) in migrate_vma_prepare()
2487 putback_lru_page(page); in migrate_vma_prepare()
2489 put_page(page); in migrate_vma_prepare()
2495 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2497 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2500 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2503 unlock_page(page); in migrate_vma_prepare()
2504 put_page(page); in migrate_vma_prepare()
2528 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2530 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2533 if (page_mapped(page)) { in migrate_vma_unmap()
2534 try_to_unmap(page, flags); in migrate_vma_unmap()
2535 if (page_mapped(page)) in migrate_vma_unmap()
2539 if (migrate_vma_check_page(page)) in migrate_vma_unmap()
2549 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2551 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2554 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2557 unlock_page(page); in migrate_vma_unmap()
2560 if (is_zone_device_page(page)) in migrate_vma_unmap()
2561 put_page(page); in migrate_vma_unmap()
2563 putback_lru_page(page); in migrate_vma_unmap()
2673 struct page *page, in migrate_vma_insert_page() argument
2726 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) in migrate_vma_insert_page()
2734 __SetPageUptodate(page); in migrate_vma_insert_page()
2736 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2737 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2740 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2744 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2756 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2762 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2772 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2777 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2778 mem_cgroup_commit_charge(page, memcg, false, false); in migrate_vma_insert_page()
2779 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2780 lru_cache_add_active_or_unevictable(page, vma); in migrate_vma_insert_page()
2781 get_page(page); in migrate_vma_insert_page()
2819 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2820 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
2829 if (!page) { in migrate_vma_pages()
2849 mapping = page_mapping(page); in migrate_vma_pages()
2871 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); in migrate_vma_pages()
2903 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
2904 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
2906 if (!page) { in migrate_vma_finalize()
2919 newpage = page; in migrate_vma_finalize()
2922 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
2923 unlock_page(page); in migrate_vma_finalize()
2926 if (is_zone_device_page(page)) in migrate_vma_finalize()
2927 put_page(page); in migrate_vma_finalize()
2929 putback_lru_page(page); in migrate_vma_finalize()
2931 if (newpage != page) { in migrate_vma_finalize()