Lines Matching full:page
74 struct page *page; in migrate_vma_collect_pmd() local
82 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
83 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
92 get_page(page); in migrate_vma_collect_pmd()
94 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
97 ret = split_huge_page(page); in migrate_vma_collect_pmd()
98 unlock_page(page); in migrate_vma_collect_pmd()
99 put_page(page); in migrate_vma_collect_pmd()
117 struct page *page; in migrate_vma_collect_pmd() local
133 * Only care about unaddressable device page special in migrate_vma_collect_pmd()
134 * page table entry. Other special swap entries are not in migrate_vma_collect_pmd()
135 * migratable, and we ignore regular swapped page. in migrate_vma_collect_pmd()
141 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd()
144 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
147 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
159 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
160 if (page && !is_zone_device_page(page) && in migrate_vma_collect_pmd()
163 else if (page && is_device_coherent_page(page) && in migrate_vma_collect_pmd()
165 page->pgmap->owner != migrate->pgmap_owner)) in migrate_vma_collect_pmd()
172 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
178 * By getting a reference on the page we pin it and that blocks in migrate_vma_collect_pmd()
182 * We drop this reference after isolating the page from the lru in migrate_vma_collect_pmd()
183 * for non device page (device page are not on the lru and thus in migrate_vma_collect_pmd()
186 get_page(page); in migrate_vma_collect_pmd()
191 * page lock. If we can't immediately lock the page we fail this in migrate_vma_collect_pmd()
194 * If we can lock the page it's safe to set up a migration entry in migrate_vma_collect_pmd()
195 * now. In the common case where the page is mapped once in a in migrate_vma_collect_pmd()
200 if (trylock_page(page)) { in migrate_vma_collect_pmd()
205 anon_exclusive = PageAnon(page) && PageAnonExclusive(page); in migrate_vma_collect_pmd()
209 if (page_try_share_anon_rmap(page)) { in migrate_vma_collect_pmd()
211 unlock_page(page); in migrate_vma_collect_pmd()
212 put_page(page); in migrate_vma_collect_pmd()
224 folio_mark_dirty(page_folio(page)); in migrate_vma_collect_pmd()
226 /* Setup special migration page table entry */ in migrate_vma_collect_pmd()
229 page_to_pfn(page)); in migrate_vma_collect_pmd()
232 page_to_pfn(page)); in migrate_vma_collect_pmd()
235 page_to_pfn(page)); in migrate_vma_collect_pmd()
258 * drop page refcount. Page won't be freed, as we took in migrate_vma_collect_pmd()
261 page_remove_rmap(page, vma, false); in migrate_vma_collect_pmd()
262 put_page(page); in migrate_vma_collect_pmd()
267 put_page(page); in migrate_vma_collect_pmd()
295 * This will walk the CPU page table. For each virtual address backed by a
296 * valid page, it updates the src array and takes a reference on the page, in
297 * order to pin the page until we lock it and unmap it.
306 * private page mappings that won't be migrated. in migrate_vma_collect()
321 * migrate_vma_check_page() - check if page is pinned or not
322 * @page: struct page to check
326 * ZONE_DEVICE page.
328 static bool migrate_vma_check_page(struct page *page, struct page *fault_page) in migrate_vma_check_page() argument
332 * isolate_lru_page() for a regular page, or migrate_vma_collect() for in migrate_vma_check_page()
333 * a device page. in migrate_vma_check_page()
335 int extra = 1 + (page == fault_page); in migrate_vma_check_page()
338 * FIXME support THP (transparent huge page), it is bit more complex to in migrate_vma_check_page()
342 if (PageCompound(page)) in migrate_vma_check_page()
345 /* Page from ZONE_DEVICE have one extra reference */ in migrate_vma_check_page()
346 if (is_zone_device_page(page)) in migrate_vma_check_page()
349 /* For file back page */ in migrate_vma_check_page()
350 if (page_mapping(page)) in migrate_vma_check_page()
351 extra += 1 + page_has_private(page); in migrate_vma_check_page()
353 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
365 struct page *fault_page) in migrate_device_unmap()
374 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_unmap() local
377 if (!page) { in migrate_device_unmap()
384 if (!is_zone_device_page(page)) { in migrate_device_unmap()
385 if (!PageLRU(page) && allow_drain) { in migrate_device_unmap()
391 if (isolate_lru_page(page)) { in migrate_device_unmap()
398 put_page(page); in migrate_device_unmap()
401 folio = page_folio(page); in migrate_device_unmap()
405 if (page_mapped(page) || in migrate_device_unmap()
406 !migrate_vma_check_page(page, fault_page)) { in migrate_device_unmap()
407 if (!is_zone_device_page(page)) { in migrate_device_unmap()
408 get_page(page); in migrate_device_unmap()
409 putback_lru_page(page); in migrate_device_unmap()
421 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_unmap() local
424 if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE)) in migrate_device_unmap()
427 folio = page_folio(page); in migrate_device_unmap()
440 * migrate_vma_unmap() - replace page mapping with special migration pte entry
443 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
448 * destination memory and copy contents of original page over to new page.
466 * and unmapped, check whether each page is pinned or not. Pages that aren't
475 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
480 * device memory to system memory. If the caller cannot migrate a device page
485 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
497 * then migrate_vma_pages() to migrate struct page information from the source
498 * struct page to the destination struct page. If it fails to migrate the
499 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
510 * It is safe to update device page table after migrate_vma_pages() because
511 * both destination and source page are still locked, and the mmap_lock is held
514 * Once the caller is done cleaning up things and updating its page table (if it
516 * migrate_vma_finalize() to update the CPU page table to point to new pages
517 * for successfully migrated pages or otherwise restore the CPU page table to
565 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
566 * private or coherent page.
570 struct page *page, in migrate_vma_insert_page() argument
621 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
626 * preceding stores to the page contents become visible before in migrate_vma_insert_page()
629 __SetPageUptodate(page); in migrate_vma_insert_page()
631 if (is_device_private_page(page)) { in migrate_vma_insert_page()
636 page_to_pfn(page)); in migrate_vma_insert_page()
639 page_to_pfn(page)); in migrate_vma_insert_page()
642 if (is_zone_device_page(page) && in migrate_vma_insert_page()
643 !is_device_coherent_page(page)) { in migrate_vma_insert_page()
644 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); in migrate_vma_insert_page()
647 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
674 page_add_new_anon_rmap(page, vma, addr); in migrate_vma_insert_page()
675 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
676 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
677 get_page(page); in migrate_vma_insert_page()
709 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); in __migrate_device_pages()
710 struct page *page = migrate_pfn_to_page(src_pfns[i]); in __migrate_device_pages() local
719 if (!page) { in __migrate_device_pages()
728 * called if the page could not be unmapped. in __migrate_device_pages()
746 mapping = page_mapping(page); in __migrate_device_pages()
760 * Other types of ZONE_DEVICE page are not supported. in __migrate_device_pages()
766 if (migrate && migrate->fault_page == page) in __migrate_device_pages()
768 page_folio(page), in __migrate_device_pages()
772 page_folio(page), MIGRATE_SYNC_NO_COPY); in __migrate_device_pages()
787 * migrate_device_pages() - migrate meta-data from src page to dst page
792 * Equivalent to migrate_vma_pages(). This is called to migrate struct page
793 * meta-data from source struct page to destination.
803 * migrate_vma_pages() - migrate meta-data from src page to dst page
806 * This migrates struct page meta-data from source struct page to destination
807 * struct page. This effectively finishes the migration from source page to the
808 * destination page.
817 * migrate_device_finalize() - complete page migration
822 * Completes migration of the page by removing special migration entries.
823 * Drivers must ensure copying of page data is complete and visible to the CPU
833 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); in migrate_device_finalize()
834 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_finalize() local
836 if (!page) { in migrate_device_finalize()
849 newpage = page; in migrate_device_finalize()
852 src = page_folio(page); in migrate_device_finalize()
857 if (is_zone_device_page(page)) in migrate_device_finalize()
858 put_page(page); in migrate_device_finalize()
860 putback_lru_page(page); in migrate_device_finalize()
862 if (newpage != page) { in migrate_device_finalize()
874 * migrate_vma_finalize() - restore CPU page table entry
878 * new page if migration was successful for that page, or to the original page
901 * virtual mappings of every page that may be in device memory. For example this
915 struct page *page = pfn_to_page(pfn); in migrate_device_range() local
917 if (!get_page_unless_zero(page)) { in migrate_device_range()
922 if (!trylock_page(page)) { in migrate_device_range()
924 put_page(page); in migrate_device_range()
938 * Migrate a device coherent page back to normal memory. The caller should have
939 * a reference on page which will be copied to the new page if migration is
942 int migrate_device_coherent_page(struct page *page) in migrate_device_coherent_page() argument
945 struct page *dpage; in migrate_device_coherent_page()
947 WARN_ON_ONCE(PageCompound(page)); in migrate_device_coherent_page()
949 lock_page(page); in migrate_device_coherent_page()
950 src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; in migrate_device_coherent_page()
953 * We don't have a VMA and don't need to walk the page tables to find in migrate_device_coherent_page()
954 * the source page. So call migrate_vma_unmap() directly to unmap the in migrate_device_coherent_page()
955 * page as migrate_vma_setup() will fail if args.vma == NULL. in migrate_device_coherent_page()
969 copy_highpage(dpage, page); in migrate_device_coherent_page()