Lines Matching refs:page

74 		struct page *page;  in migrate_vma_collect_pmd()  local
82 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
83 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
89 get_page(page); in migrate_vma_collect_pmd()
91 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
94 ret = split_huge_page(page); in migrate_vma_collect_pmd()
95 unlock_page(page); in migrate_vma_collect_pmd()
96 put_page(page); in migrate_vma_collect_pmd()
110 struct page *page; in migrate_vma_collect_pmd() local
134 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd()
137 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
140 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
152 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
153 if (page && !is_zone_device_page(page) && in migrate_vma_collect_pmd()
156 else if (page && is_device_coherent_page(page) && in migrate_vma_collect_pmd()
158 page->pgmap->owner != migrate->pgmap_owner)) in migrate_vma_collect_pmd()
165 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
179 get_page(page); in migrate_vma_collect_pmd()
193 if (trylock_page(page)) { in migrate_vma_collect_pmd()
198 anon_exclusive = PageAnon(page) && PageAnonExclusive(page); in migrate_vma_collect_pmd()
202 if (page_try_share_anon_rmap(page)) { in migrate_vma_collect_pmd()
204 unlock_page(page); in migrate_vma_collect_pmd()
205 put_page(page); in migrate_vma_collect_pmd()
217 folio_mark_dirty(page_folio(page)); in migrate_vma_collect_pmd()
222 page_to_pfn(page)); in migrate_vma_collect_pmd()
225 page_to_pfn(page)); in migrate_vma_collect_pmd()
228 page_to_pfn(page)); in migrate_vma_collect_pmd()
254 page_remove_rmap(page, vma, false); in migrate_vma_collect_pmd()
255 put_page(page); in migrate_vma_collect_pmd()
260 put_page(page); in migrate_vma_collect_pmd()
322 static bool migrate_vma_check_page(struct page *page, struct page *fault_page) in migrate_vma_check_page() argument
329 int extra = 1 + (page == fault_page); in migrate_vma_check_page()
336 if (PageCompound(page)) in migrate_vma_check_page()
340 if (is_zone_device_page(page)) in migrate_vma_check_page()
344 if (page_mapping(page)) in migrate_vma_check_page()
345 extra += 1 + page_has_private(page); in migrate_vma_check_page()
347 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
359 struct page *fault_page) in migrate_device_unmap()
368 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_unmap() local
371 if (!page) { in migrate_device_unmap()
378 if (!is_zone_device_page(page)) { in migrate_device_unmap()
379 if (!PageLRU(page) && allow_drain) { in migrate_device_unmap()
385 if (!isolate_lru_page(page)) { in migrate_device_unmap()
392 put_page(page); in migrate_device_unmap()
395 folio = page_folio(page); in migrate_device_unmap()
399 if (page_mapped(page) || in migrate_device_unmap()
400 !migrate_vma_check_page(page, fault_page)) { in migrate_device_unmap()
401 if (!is_zone_device_page(page)) { in migrate_device_unmap()
402 get_page(page); in migrate_device_unmap()
403 putback_lru_page(page); in migrate_device_unmap()
415 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_unmap() local
418 if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE)) in migrate_device_unmap()
421 folio = page_folio(page); in migrate_device_unmap()
564 struct page *page, in migrate_vma_insert_page() argument
599 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
607 __SetPageUptodate(page); in migrate_vma_insert_page()
609 if (is_device_private_page(page)) { in migrate_vma_insert_page()
614 page_to_pfn(page)); in migrate_vma_insert_page()
617 page_to_pfn(page)); in migrate_vma_insert_page()
620 if (is_zone_device_page(page) && in migrate_vma_insert_page()
621 !is_device_coherent_page(page)) { in migrate_vma_insert_page()
625 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
655 page_add_new_anon_rmap(page, vma, addr); in migrate_vma_insert_page()
656 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
657 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
658 get_page(page); in migrate_vma_insert_page()
690 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); in __migrate_device_pages()
691 struct page *page = migrate_pfn_to_page(src_pfns[i]); in __migrate_device_pages() local
700 if (!page) { in __migrate_device_pages()
727 mapping = page_mapping(page); in __migrate_device_pages()
734 folio = page_folio(page); in __migrate_device_pages()
756 if (migrate && migrate->fault_page == page) in __migrate_device_pages()
758 page_folio(page), in __migrate_device_pages()
762 page_folio(page), MIGRATE_SYNC_NO_COPY); in __migrate_device_pages()
818 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); in migrate_device_finalize()
819 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_finalize() local
821 if (!page) { in migrate_device_finalize()
834 newpage = page; in migrate_device_finalize()
837 src = page_folio(page); in migrate_device_finalize()
842 if (is_zone_device_page(page)) in migrate_device_finalize()
843 put_page(page); in migrate_device_finalize()
845 putback_lru_page(page); in migrate_device_finalize()
847 if (newpage != page) { in migrate_device_finalize()
900 struct page *page = pfn_to_page(pfn); in migrate_device_range() local
902 if (!get_page_unless_zero(page)) { in migrate_device_range()
907 if (!trylock_page(page)) { in migrate_device_range()
909 put_page(page); in migrate_device_range()
927 int migrate_device_coherent_page(struct page *page) in migrate_device_coherent_page() argument
930 struct page *dpage; in migrate_device_coherent_page()
932 WARN_ON_ONCE(PageCompound(page)); in migrate_device_coherent_page()
934 lock_page(page); in migrate_device_coherent_page()
935 src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; in migrate_device_coherent_page()
954 copy_highpage(dpage, page); in migrate_device_coherent_page()