Lines Matching refs:page
61 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
74 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
82 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
95 if (unlikely(!trylock_page(page))) in isolate_movable_page()
98 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
101 mapping = page_mapping(page); in isolate_movable_page()
102 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
104 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
108 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
109 __SetPageIsolated(page); in isolate_movable_page()
110 unlock_page(page); in isolate_movable_page()
115 unlock_page(page); in isolate_movable_page()
117 put_page(page); in isolate_movable_page()
122 static void putback_movable_page(struct page *page) in putback_movable_page() argument
126 mapping = page_mapping(page); in putback_movable_page()
127 mapping->a_ops->putback_page(page); in putback_movable_page()
128 __ClearPageIsolated(page); in putback_movable_page()
141 struct page *page; in putback_movable_pages() local
142 struct page *page2; in putback_movable_pages()
144 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
145 if (unlikely(PageHuge(page))) { in putback_movable_pages()
146 putback_active_hugepage(page); in putback_movable_pages()
149 list_del(&page->lru); in putback_movable_pages()
155 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
156 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
157 lock_page(page); in putback_movable_pages()
158 if (PageMovable(page)) in putback_movable_pages()
159 putback_movable_page(page); in putback_movable_pages()
161 __ClearPageIsolated(page); in putback_movable_pages()
162 unlock_page(page); in putback_movable_pages()
163 put_page(page); in putback_movable_pages()
165 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
166 page_is_file_lru(page), -thp_nr_pages(page)); in putback_movable_pages()
167 putback_lru_page(page); in putback_movable_pages()
175 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
179 .page = old, in remove_migration_pte()
184 struct page *new; in remove_migration_pte()
188 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
190 if (PageKsm(page)) in remove_migration_pte()
191 new = page; in remove_migration_pte()
193 new = page - pvmw.page->index + in remove_migration_pte()
199 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
257 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
258 clear_page_mlock(page); in remove_migration_pte()
271 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
294 struct page *page; in __migration_entry_wait() local
305 page = pfn_swap_entry_to_page(entry); in __migration_entry_wait()
306 page = compound_head(page); in __migration_entry_wait()
313 if (!get_page_unless_zero(page)) in __migration_entry_wait()
316 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); in __migration_entry_wait()
341 struct page *page; in pmd_migration_entry_wait() local
346 page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
347 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
350 put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); in pmd_migration_entry_wait()
357 static int expected_page_refs(struct address_space *mapping, struct page *page) in expected_page_refs() argument
365 expected_count += is_device_private_page(page); in expected_page_refs()
367 expected_count += thp_nr_pages(page) + page_has_private(page); in expected_page_refs()
381 struct page *newpage, struct page *page, int extra_count) in migrate_page_move_mapping() argument
383 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping()
386 int expected_count = expected_page_refs(mapping, page) + extra_count; in migrate_page_move_mapping()
387 int nr = thp_nr_pages(page); in migrate_page_move_mapping()
391 if (page_count(page) != expected_count) in migrate_page_move_mapping()
395 newpage->index = page->index; in migrate_page_move_mapping()
396 newpage->mapping = page->mapping; in migrate_page_move_mapping()
397 if (PageSwapBacked(page)) in migrate_page_move_mapping()
403 oldzone = page_zone(page); in migrate_page_move_mapping()
407 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping()
412 if (!page_ref_freeze(page, expected_count)) { in migrate_page_move_mapping()
421 newpage->index = page->index; in migrate_page_move_mapping()
422 newpage->mapping = page->mapping; in migrate_page_move_mapping()
424 if (PageSwapBacked(page)) { in migrate_page_move_mapping()
426 if (PageSwapCache(page)) { in migrate_page_move_mapping()
428 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
431 VM_BUG_ON_PAGE(PageSwapCache(page), page); in migrate_page_move_mapping()
435 dirty = PageDirty(page); in migrate_page_move_mapping()
437 ClearPageDirty(page); in migrate_page_move_mapping()
442 if (PageTransHuge(page)) { in migrate_page_move_mapping()
456 page_ref_unfreeze(page, expected_count - nr); in migrate_page_move_mapping()
475 memcg = page_memcg(page); in migrate_page_move_mapping()
481 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
486 if (PageSwapCache(page)) { in migrate_page_move_mapping()
509 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
511 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
515 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
516 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_huge_page_move_mapping()
521 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
526 newpage->index = page->index; in migrate_huge_page_move_mapping()
527 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
533 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
543 void migrate_page_states(struct page *newpage, struct page *page) in migrate_page_states() argument
547 if (PageError(page)) in migrate_page_states()
549 if (PageReferenced(page)) in migrate_page_states()
551 if (PageUptodate(page)) in migrate_page_states()
553 if (TestClearPageActive(page)) { in migrate_page_states()
554 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_states()
556 } else if (TestClearPageUnevictable(page)) in migrate_page_states()
558 if (PageWorkingset(page)) in migrate_page_states()
560 if (PageChecked(page)) in migrate_page_states()
562 if (PageMappedToDisk(page)) in migrate_page_states()
566 if (PageDirty(page)) in migrate_page_states()
569 if (page_is_young(page)) in migrate_page_states()
571 if (page_is_idle(page)) in migrate_page_states()
578 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_states()
581 ksm_migrate_page(newpage, page); in migrate_page_states()
586 if (PageSwapCache(page)) in migrate_page_states()
587 ClearPageSwapCache(page); in migrate_page_states()
588 ClearPagePrivate(page); in migrate_page_states()
591 if (!PageHuge(page)) in migrate_page_states()
592 set_page_private(page, 0); in migrate_page_states()
606 if (PageReadahead(page)) in migrate_page_states()
609 copy_page_owner(page, newpage); in migrate_page_states()
611 if (!PageHuge(page)) in migrate_page_states()
612 mem_cgroup_migrate(page, newpage); in migrate_page_states()
616 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
618 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
619 copy_huge_page(newpage, page); in migrate_page_copy()
621 copy_highpage(newpage, page); in migrate_page_copy()
623 migrate_page_states(newpage, page); in migrate_page_copy()
638 struct page *newpage, struct page *page, in migrate_page() argument
643 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page()
645 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in migrate_page()
651 migrate_page_copy(newpage, page); in migrate_page()
653 migrate_page_states(newpage, page); in migrate_page()
698 struct page *newpage, struct page *page, enum migrate_mode mode, in __buffer_migrate_page() argument
705 if (!page_has_buffers(page)) in __buffer_migrate_page()
706 return migrate_page(mapping, newpage, page, mode); in __buffer_migrate_page()
709 expected_count = expected_page_refs(mapping, page); in __buffer_migrate_page()
710 if (page_count(page) != expected_count) in __buffer_migrate_page()
713 head = page_buffers(page); in __buffer_migrate_page()
744 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in __buffer_migrate_page()
748 attach_page_private(newpage, detach_page_private(page)); in __buffer_migrate_page()
758 migrate_page_copy(newpage, page); in __buffer_migrate_page()
760 migrate_page_states(newpage, page); in __buffer_migrate_page()
782 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
784 return __buffer_migrate_page(mapping, newpage, page, mode, false); in buffer_migrate_page()
795 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page_norefs() argument
797 return __buffer_migrate_page(mapping, newpage, page, mode, true); in buffer_migrate_page_norefs()
804 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
819 if (!clear_page_dirty_for_io(page)) in writeout()
831 remove_migration_ptes(page, page, false); in writeout()
833 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
837 lock_page(page); in writeout()
846 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
848 if (PageDirty(page)) { in fallback_migrate_page()
857 return writeout(mapping, page); in fallback_migrate_page()
864 if (page_has_private(page) && in fallback_migrate_page()
865 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
868 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
882 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
887 bool is_lru = !__PageMovable(page); in move_to_new_page()
889 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
892 mapping = page_mapping(page); in move_to_new_page()
896 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
906 page, mode); in move_to_new_page()
909 page, mode); in move_to_new_page()
915 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
916 if (!PageMovable(page)) { in move_to_new_page()
918 __ClearPageIsolated(page); in move_to_new_page()
923 page, mode); in move_to_new_page()
925 !PageIsolated(page)); in move_to_new_page()
933 if (__PageMovable(page)) { in move_to_new_page()
934 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
940 __ClearPageIsolated(page); in move_to_new_page()
948 if (!PageMappingFlags(page)) in move_to_new_page()
949 page->mapping = NULL; in move_to_new_page()
959 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
965 bool is_lru = !__PageMovable(page); in __unmap_and_move()
967 if (!trylock_page(page)) { in __unmap_and_move()
987 lock_page(page); in __unmap_and_move()
990 if (PageWriteback(page)) { in __unmap_and_move()
1007 wait_on_page_writeback(page); in __unmap_and_move()
1024 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1025 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1039 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1055 if (!page->mapping) { in __unmap_and_move()
1056 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1057 if (page_has_private(page)) { in __unmap_and_move()
1058 try_to_free_buffers(page); in __unmap_and_move()
1061 } else if (page_mapped(page)) { in __unmap_and_move()
1063 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1064 page); in __unmap_and_move()
1065 try_to_migrate(page, 0); in __unmap_and_move()
1069 if (!page_mapped(page)) in __unmap_and_move()
1070 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1073 remove_migration_ptes(page, in __unmap_and_move()
1074 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1082 unlock_page(page); in __unmap_and_move()
1183 unsigned long private, struct page *page, in unmap_and_move() argument
1189 struct page *newpage = NULL; in unmap_and_move()
1191 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1194 if (page_count(page) == 1) { in unmap_and_move()
1196 ClearPageActive(page); in unmap_and_move()
1197 ClearPageUnevictable(page); in unmap_and_move()
1198 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1199 lock_page(page); in unmap_and_move()
1200 if (!PageMovable(page)) in unmap_and_move()
1201 __ClearPageIsolated(page); in unmap_and_move()
1202 unlock_page(page); in unmap_and_move()
1207 newpage = get_new_page(page, private); in unmap_and_move()
1211 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1222 list_del(&page->lru); in unmap_and_move()
1236 if (likely(!__PageMovable(page))) in unmap_and_move()
1237 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1238 page_is_file_lru(page), -thp_nr_pages(page)); in unmap_and_move()
1244 put_page(page); in unmap_and_move()
1247 list_add_tail(&page->lru, ret); in unmap_and_move()
1278 struct page *hpage, int force, in unmap_and_move_huge_page()
1284 struct page *new_hpage; in unmap_and_move_huge_page()
1405 static inline int try_split_thp(struct page *page, struct page **page2, in try_split_thp() argument
1410 lock_page(page); in try_split_thp()
1411 rc = split_huge_page_to_list(page, from); in try_split_thp()
1412 unlock_page(page); in try_split_thp()
1414 list_safe_reset_next(page, *page2, lru); in try_split_thp()
1455 struct page *page; in migrate_pages() local
1456 struct page *page2; in migrate_pages()
1471 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1478 is_thp = PageTransHuge(page) && !PageHuge(page); in migrate_pages()
1479 nr_subpages = thp_nr_pages(page); in migrate_pages()
1482 if (PageHuge(page)) in migrate_pages()
1484 put_new_page, private, page, in migrate_pages()
1489 private, page, pass > 2, mode, in migrate_pages()
1515 if (!try_split_thp(page, &page2, from)) { in migrate_pages()
1535 if (!try_split_thp(page, &page2, from)) { in migrate_pages()
1605 struct page *alloc_migration_target(struct page *page, unsigned long private) in alloc_migration_target() argument
1610 struct page *new_page = NULL; in alloc_migration_target()
1618 nid = page_to_nid(page); in alloc_migration_target()
1620 if (PageHuge(page)) { in alloc_migration_target()
1621 struct hstate *h = page_hstate(compound_head(page)); in alloc_migration_target()
1627 if (PageTransHuge(page)) { in alloc_migration_target()
1636 zidx = zone_idx(page_zone(page)); in alloc_migration_target()
1690 struct page *page; in add_page_for_migration() local
1702 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1704 err = PTR_ERR(page); in add_page_for_migration()
1705 if (IS_ERR(page)) in add_page_for_migration()
1709 if (!page) in add_page_for_migration()
1713 if (page_to_nid(page) == node) in add_page_for_migration()
1717 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1720 if (PageHuge(page)) { in add_page_for_migration()
1721 if (PageHead(page)) { in add_page_for_migration()
1722 isolate_huge_page(page, pagelist); in add_page_for_migration()
1726 struct page *head; in add_page_for_migration()
1728 head = compound_head(page); in add_page_for_migration()
1745 put_page(page); in add_page_for_migration()
1878 struct page *page; in do_pages_stat_array() local
1886 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1888 err = PTR_ERR(page); in do_pages_stat_array()
1889 if (IS_ERR(page)) in do_pages_stat_array()
1892 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
2077 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
2081 struct page *newpage; in alloc_misplaced_dst_page()
2092 static struct page *alloc_misplaced_dst_page_thp(struct page *page, in alloc_misplaced_dst_page_thp() argument
2096 struct page *newpage; in alloc_misplaced_dst_page_thp()
2109 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
2112 int nr_pages = thp_nr_pages(page); in numamigrate_isolate_page()
2114 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
2117 if (PageTransHuge(page) && total_mapcount(page) > 1) in numamigrate_isolate_page()
2124 if (isolate_lru_page(page)) in numamigrate_isolate_page()
2127 page_lru = page_is_file_lru(page); in numamigrate_isolate_page()
2128 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
2136 put_page(page); in numamigrate_isolate_page()
2145 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
2154 int nr_pages = thp_nr_pages(page); in migrate_misplaced_page()
2161 compound = PageTransHuge(page); in migrate_misplaced_page()
2172 if (page_mapcount(page) != 1 && page_is_file_lru(page) && in migrate_misplaced_page()
2180 if (page_is_file_lru(page) && PageDirty(page)) in migrate_misplaced_page()
2183 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
2187 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
2192 list_del(&page->lru); in migrate_misplaced_page()
2193 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in migrate_misplaced_page()
2194 page_is_file_lru(page), -nr_pages); in migrate_misplaced_page()
2195 putback_lru_page(page); in migrate_misplaced_page()
2204 put_page(page); in migrate_misplaced_page()
2265 struct page *page; in migrate_vma_collect_pmd() local
2273 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2274 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2283 get_page(page); in migrate_vma_collect_pmd()
2285 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2288 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2289 unlock_page(page); in migrate_vma_collect_pmd()
2290 put_page(page); in migrate_vma_collect_pmd()
2308 struct page *page; in migrate_vma_collect_pmd() local
2332 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd()
2335 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
2338 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
2351 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2357 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2371 get_page(page); in migrate_vma_collect_pmd()
2379 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2388 page_to_pfn(page)); in migrate_vma_collect_pmd()
2391 page_to_pfn(page)); in migrate_vma_collect_pmd()
2411 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2412 put_page(page); in migrate_vma_collect_pmd()
2474 static bool migrate_vma_check_page(struct page *page) in migrate_vma_check_page() argument
2488 if (PageCompound(page)) in migrate_vma_check_page()
2492 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2506 return is_device_private_page(page); in migrate_vma_check_page()
2510 if (page_mapping(page)) in migrate_vma_check_page()
2511 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2513 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2538 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2541 if (!page) in migrate_vma_prepare()
2553 if (!trylock_page(page)) { in migrate_vma_prepare()
2556 put_page(page); in migrate_vma_prepare()
2564 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2565 if (!PageLRU(page) && allow_drain) { in migrate_vma_prepare()
2571 if (isolate_lru_page(page)) { in migrate_vma_prepare()
2578 unlock_page(page); in migrate_vma_prepare()
2580 put_page(page); in migrate_vma_prepare()
2586 put_page(page); in migrate_vma_prepare()
2589 if (!migrate_vma_check_page(page)) { in migrate_vma_prepare()
2595 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2596 get_page(page); in migrate_vma_prepare()
2597 putback_lru_page(page); in migrate_vma_prepare()
2601 unlock_page(page); in migrate_vma_prepare()
2604 if (!is_zone_device_page(page)) in migrate_vma_prepare()
2605 putback_lru_page(page); in migrate_vma_prepare()
2607 put_page(page); in migrate_vma_prepare()
2613 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2615 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2618 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2621 unlock_page(page); in migrate_vma_prepare()
2622 put_page(page); in migrate_vma_prepare()
2645 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2647 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2650 if (page_mapped(page)) { in migrate_vma_unmap()
2651 try_to_migrate(page, 0); in migrate_vma_unmap()
2652 if (page_mapped(page)) in migrate_vma_unmap()
2656 if (migrate_vma_check_page(page)) in migrate_vma_unmap()
2666 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2668 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2671 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2674 unlock_page(page); in migrate_vma_unmap()
2677 if (is_zone_device_page(page)) in migrate_vma_unmap()
2678 put_page(page); in migrate_vma_unmap()
2680 putback_lru_page(page); in migrate_vma_unmap()
2798 struct page *page, in migrate_vma_insert_page() argument
2849 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
2857 __SetPageUptodate(page); in migrate_vma_insert_page()
2859 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2860 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2865 page_to_pfn(page)); in migrate_vma_insert_page()
2868 page_to_pfn(page)); in migrate_vma_insert_page()
2879 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2906 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2907 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2908 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
2909 get_page(page); in migrate_vma_insert_page()
2949 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2950 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
2959 if (!page) { in migrate_vma_pages()
2976 mapping = page_mapping(page); in migrate_vma_pages()
2998 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); in migrate_vma_pages()
3030 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
3031 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
3033 if (!page) { in migrate_vma_finalize()
3046 newpage = page; in migrate_vma_finalize()
3049 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
3050 unlock_page(page); in migrate_vma_finalize()
3052 if (is_zone_device_page(page)) in migrate_vma_finalize()
3053 put_page(page); in migrate_vma_finalize()
3055 putback_lru_page(page); in migrate_vma_finalize()
3057 if (newpage != page) { in migrate_vma_finalize()