Lines Matching full:page
74 struct page *page; in migrate_vma_collect_pmd() local
82 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
83 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
89 get_page(page); in migrate_vma_collect_pmd()
91 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
94 ret = split_huge_page(page); in migrate_vma_collect_pmd()
95 unlock_page(page); in migrate_vma_collect_pmd()
96 put_page(page); in migrate_vma_collect_pmd()
110 struct page *page; in migrate_vma_collect_pmd() local
126 * Only care about unaddressable device page special in migrate_vma_collect_pmd()
127 * page table entry. Other special swap entries are not in migrate_vma_collect_pmd()
128 * migratable, and we ignore regular swapped page. in migrate_vma_collect_pmd()
134 page = pfn_swap_entry_to_page(entry); in migrate_vma_collect_pmd()
137 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
140 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
152 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
153 if (page && !is_zone_device_page(page) && in migrate_vma_collect_pmd()
156 else if (page && is_device_coherent_page(page) && in migrate_vma_collect_pmd()
158 page->pgmap->owner != migrate->pgmap_owner)) in migrate_vma_collect_pmd()
165 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
171 * By getting a reference on the page we pin it and that blocks in migrate_vma_collect_pmd()
175 * We drop this reference after isolating the page from the lru in migrate_vma_collect_pmd()
176 * for non device page (device page are not on the lru and thus in migrate_vma_collect_pmd()
179 get_page(page); in migrate_vma_collect_pmd()
184 * page lock. If we can't immediately lock the page we fail this in migrate_vma_collect_pmd()
187 * If we can lock the page it's safe to set up a migration entry in migrate_vma_collect_pmd()
188 * now. In the common case where the page is mapped once in a in migrate_vma_collect_pmd()
193 if (trylock_page(page)) { in migrate_vma_collect_pmd()
198 anon_exclusive = PageAnon(page) && PageAnonExclusive(page); in migrate_vma_collect_pmd()
202 if (page_try_share_anon_rmap(page)) { in migrate_vma_collect_pmd()
204 unlock_page(page); in migrate_vma_collect_pmd()
205 put_page(page); in migrate_vma_collect_pmd()
217 folio_mark_dirty(page_folio(page)); in migrate_vma_collect_pmd()
219 /* Setup special migration page table entry */ in migrate_vma_collect_pmd()
222 page_to_pfn(page)); in migrate_vma_collect_pmd()
225 page_to_pfn(page)); in migrate_vma_collect_pmd()
228 page_to_pfn(page)); in migrate_vma_collect_pmd()
251 * drop page refcount. Page won't be freed, as we took in migrate_vma_collect_pmd()
254 page_remove_rmap(page, vma, false); in migrate_vma_collect_pmd()
255 put_page(page); in migrate_vma_collect_pmd()
260 put_page(page); in migrate_vma_collect_pmd()
289 * This will walk the CPU page table. For each virtual address backed by a
290 * valid page, it updates the src array and takes a reference on the page, in
291 * order to pin the page until we lock it and unmap it.
300 * private page mappings that won't be migrated. in migrate_vma_collect()
315 * migrate_vma_check_page() - check if page is pinned or not
316 * @page: struct page to check
320 * ZONE_DEVICE page.
322 static bool migrate_vma_check_page(struct page *page, struct page *fault_page) in migrate_vma_check_page() argument
326 * isolate_lru_page() for a regular page, or migrate_vma_collect() for in migrate_vma_check_page()
327 * a device page. in migrate_vma_check_page()
329 int extra = 1 + (page == fault_page); in migrate_vma_check_page()
332 * FIXME support THP (transparent huge page), it is bit more complex to in migrate_vma_check_page()
336 if (PageCompound(page)) in migrate_vma_check_page()
339 /* Page from ZONE_DEVICE have one extra reference */ in migrate_vma_check_page()
340 if (is_zone_device_page(page)) in migrate_vma_check_page()
343 /* For file back page */ in migrate_vma_check_page()
344 if (page_mapping(page)) in migrate_vma_check_page()
345 extra += 1 + page_has_private(page); in migrate_vma_check_page()
347 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
359 struct page *fault_page) in migrate_device_unmap()
368 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_unmap() local
371 if (!page) { in migrate_device_unmap()
378 if (!is_zone_device_page(page)) { in migrate_device_unmap()
379 if (!PageLRU(page) && allow_drain) { in migrate_device_unmap()
385 if (!isolate_lru_page(page)) { in migrate_device_unmap()
392 put_page(page); in migrate_device_unmap()
395 folio = page_folio(page); in migrate_device_unmap()
399 if (page_mapped(page) || in migrate_device_unmap()
400 !migrate_vma_check_page(page, fault_page)) { in migrate_device_unmap()
401 if (!is_zone_device_page(page)) { in migrate_device_unmap()
402 get_page(page); in migrate_device_unmap()
403 putback_lru_page(page); in migrate_device_unmap()
415 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_unmap() local
418 if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE)) in migrate_device_unmap()
421 folio = page_folio(page); in migrate_device_unmap()
434 * migrate_vma_unmap() - replace page mapping with special migration pte entry
437 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
442 * destination memory and copy contents of original page over to new page.
460 * and unmapped, check whether each page is pinned or not. Pages that aren't
469 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
474 * device memory to system memory. If the caller cannot migrate a device page
479 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
491 * then migrate_vma_pages() to migrate struct page information from the source
492 * struct page to the destination struct page. If it fails to migrate the
493 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
504 * It is safe to update device page table after migrate_vma_pages() because
505 * both destination and source page are still locked, and the mmap_lock is held
508 * Once the caller is done cleaning up things and updating its page table (if it
510 * migrate_vma_finalize() to update the CPU page table to point to new pages
511 * for successfully migrated pages or otherwise restore the CPU page table to
559 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
560 * private or coherent page.
564 struct page *page, in migrate_vma_insert_page() argument
599 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
604 * preceding stores to the page contents become visible before in migrate_vma_insert_page()
607 __SetPageUptodate(page); in migrate_vma_insert_page()
609 if (is_device_private_page(page)) { in migrate_vma_insert_page()
614 page_to_pfn(page)); in migrate_vma_insert_page()
617 page_to_pfn(page)); in migrate_vma_insert_page()
620 if (is_zone_device_page(page) && in migrate_vma_insert_page()
621 !is_device_coherent_page(page)) { in migrate_vma_insert_page()
622 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); in migrate_vma_insert_page()
625 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
655 page_add_new_anon_rmap(page, vma, addr); in migrate_vma_insert_page()
656 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
657 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
658 get_page(page); in migrate_vma_insert_page()
690 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); in __migrate_device_pages()
691 struct page *page = migrate_pfn_to_page(src_pfns[i]); in __migrate_device_pages() local
700 if (!page) { in __migrate_device_pages()
709 * called if the page could not be unmapped. in __migrate_device_pages()
727 mapping = page_mapping(page); in __migrate_device_pages()
734 folio = page_folio(page); in __migrate_device_pages()
750 * Other types of ZONE_DEVICE page are not supported. in __migrate_device_pages()
756 if (migrate && migrate->fault_page == page) in __migrate_device_pages()
758 page_folio(page), in __migrate_device_pages()
762 page_folio(page), MIGRATE_SYNC_NO_COPY); in __migrate_device_pages()
772 * migrate_device_pages() - migrate meta-data from src page to dst page
777 * Equivalent to migrate_vma_pages(). This is called to migrate struct page
778 * meta-data from source struct page to destination.
788 * migrate_vma_pages() - migrate meta-data from src page to dst page
791 * This migrates struct page meta-data from source struct page to destination
792 * struct page. This effectively finishes the migration from source page to the
793 * destination page.
802 * migrate_device_finalize() - complete page migration
807 * Completes migration of the page by removing special migration entries.
808 * Drivers must ensure copying of page data is complete and visible to the CPU
818 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); in migrate_device_finalize()
819 struct page *page = migrate_pfn_to_page(src_pfns[i]); in migrate_device_finalize() local
821 if (!page) { in migrate_device_finalize()
834 newpage = page; in migrate_device_finalize()
837 src = page_folio(page); in migrate_device_finalize()
842 if (is_zone_device_page(page)) in migrate_device_finalize()
843 put_page(page); in migrate_device_finalize()
845 putback_lru_page(page); in migrate_device_finalize()
847 if (newpage != page) { in migrate_device_finalize()
859 * migrate_vma_finalize() - restore CPU page table entry
863 * new page if migration was successful for that page, or to the original page
886 * virtual mappings of every page that may be in device memory. For example this
900 struct page *page = pfn_to_page(pfn); in migrate_device_range() local
902 if (!get_page_unless_zero(page)) { in migrate_device_range()
907 if (!trylock_page(page)) { in migrate_device_range()
909 put_page(page); in migrate_device_range()
923 * Migrate a device coherent page back to normal memory. The caller should have
924 * a reference on page which will be copied to the new page if migration is
927 int migrate_device_coherent_page(struct page *page) in migrate_device_coherent_page() argument
930 struct page *dpage; in migrate_device_coherent_page()
932 WARN_ON_ONCE(PageCompound(page)); in migrate_device_coherent_page()
934 lock_page(page); in migrate_device_coherent_page()
935 src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; in migrate_device_coherent_page()
938 * We don't have a VMA and don't need to walk the page tables to find in migrate_device_coherent_page()
939 * the source page. So call migrate_vma_unmap() directly to unmap the in migrate_device_coherent_page()
940 * page as migrate_vma_setup() will fail if args.vma == NULL. in migrate_device_coherent_page()
954 copy_highpage(dpage, page); in migrate_device_coherent_page()