Lines Matching full:page
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
41 #include <linux/page-flags.h>
42 #include <linux/kernel-page-flags.h>
62 #include <linux/page-isolation.h>
79 * 1: the page is dissolved (if needed) and taken off from buddy,
80 * 0: the page is dissolved (if needed) and not taken off from buddy,
83 static int __page_handle_poison(struct page *page) in __page_handle_poison() argument
87 zone_pcp_disable(page_zone(page)); in __page_handle_poison()
88 ret = dissolve_free_huge_page(page); in __page_handle_poison()
90 ret = take_page_off_buddy(page); in __page_handle_poison()
91 zone_pcp_enable(page_zone(page)); in __page_handle_poison()
96 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) in page_handle_poison() argument
103 if (__page_handle_poison(page) <= 0) in page_handle_poison()
105 * We could fail to take off the target page from buddy in page_handle_poison()
106 * for example due to racy page allocation, but that's in page_handle_poison()
107 * acceptable because soft-offlined page is not broken in page_handle_poison()
114 SetPageHWPoison(page); in page_handle_poison()
116 put_page(page); in page_handle_poison()
117 page_ref_inc(page); in page_handle_poison()
136 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev()
160 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags()
179 * can only guarantee that the page either belongs to the memcg tasks, or is
180 * a freed page.
185 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task()
196 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task()
199 int hwpoison_filter(struct page *p) in hwpoison_filter()
216 int hwpoison_filter(struct page *p) in hwpoison_filter()
225 * Kill all processes that have a poisoned page mapped and then isolate
226 * the page.
229 * Find all processes having the page mapped and kill them.
230 * But we keep a page reference around so that the page is not
232 * Then stash the page away
254 * Send all the processes who have the page mapped a signal.
272 * Signal other processes sharing the page if they have in kill_proc()
288 * Unknown page type encountered. Try to check whether it can turn PageLRU by
291 void shake_page(struct page *p) in shake_page()
354 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
356 * memory_failure event. In all other cases, page->index and
357 * page->mapping are sufficient for mapping the page back to its
360 static void add_to_kill(struct task_struct *tsk, struct page *p, in add_to_kill()
384 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times in add_to_kill()
386 * contain mappings for the page, but at least one VMA does. in add_to_kill()
388 * has a mapping for the page. in add_to_kill()
424 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", in kill_procs()
472 * to be signaled when some page under the process is hwpoisoned.
479 * processes sharing the same error page,if the process is "early kill", the
498 * Collect processes when the error hit an anonymous page.
500 static void collect_procs_anon(struct page *page, struct list_head *to_kill, in collect_procs_anon() argument
503 struct folio *folio = page_folio(page); in collect_procs_anon()
513 pgoff = page_to_pgoff(page); in collect_procs_anon()
526 if (!page_mapped_in_vma(page, vma)) in collect_procs_anon()
528 add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, to_kill); in collect_procs_anon()
536 * Collect processes when the error hit a file mapped page.
538 static void collect_procs_file(struct page *page, struct list_head *to_kill, in collect_procs_file() argument
543 struct address_space *mapping = page->mapping; in collect_procs_file()
548 pgoff = page_to_pgoff(page); in collect_procs_file()
558 * the page but the corrupted page is not necessarily in collect_procs_file()
564 add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, in collect_procs_file()
574 * Collect processes when the error hit a fsdax page.
576 static void collect_procs_fsdax(struct page *page, in collect_procs_fsdax() argument
592 add_to_kill(t, page, pgoff, vma, to_kill); in collect_procs_fsdax()
601 * Collect the processes who have the corrupted page mapped to kill.
603 static void collect_procs(struct page *page, struct list_head *tokill, in collect_procs() argument
606 if (!page->mapping) in collect_procs()
609 if (PageAnon(page)) in collect_procs()
610 collect_procs_anon(page, tokill, force_early); in collect_procs()
612 collect_procs_file(page, tokill, force_early); in collect_procs()
732 * memory_failure() failed to unmap the error page at the first call, or
736 * so this function walks page table to find it. The returned virtual address
738 * process has multiple entries mapping the error page.
771 [MF_MSG_KERNEL] = "reserved kernel page",
772 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
773 [MF_MSG_SLAB] = "kernel slab page",
774 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
775 [MF_MSG_HUGE] = "huge page",
776 [MF_MSG_FREE_HUGE] = "free huge page",
777 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
778 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
779 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
780 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
781 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
782 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
783 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
784 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
785 [MF_MSG_CLEAN_LRU] = "clean LRU page",
786 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
787 [MF_MSG_BUDDY] = "free buddy page",
788 [MF_MSG_DAX] = "dax page",
790 [MF_MSG_UNKNOWN] = "unknown page",
794 * XXX: It is possible that a page is isolated from LRU cache,
795 * and then kept in swap cache or failed to remove from page cache.
796 * The page count will stop it from being freed by unpoison.
799 static int delete_from_lru_cache(struct page *p) in delete_from_lru_cache()
803 * Clear sensible page flags, so that the buddy system won't in delete_from_lru_cache()
804 * complain when the page is unpoison-and-freed. in delete_from_lru_cache()
810 * Poisoned page might never drop its ref count to 0 so we have in delete_from_lru_cache()
816 * drop the page count elevated by isolate_lru_page() in delete_from_lru_cache()
824 static int truncate_error_page(struct page *p, unsigned long pfn, in truncate_error_page()
833 pr_info("%#lx: Failed to punch page: %d\n", pfn, err); in truncate_error_page()
859 /* Callback ->action() has to unlock the relevant page inside it. */
860 int (*action)(struct page_state *ps, struct page *p);
864 * Return true if page is still referenced by others, otherwise return
869 static bool has_extra_refcount(struct page_state *ps, struct page *p, in has_extra_refcount()
887 * Error hit kernel page.
891 static int me_kernel(struct page_state *ps, struct page *p) in me_kernel()
898 * Page in unknown state. Do nothing.
900 static int me_unknown(struct page_state *ps, struct page *p) in me_unknown()
902 pr_err("%#lx: Unknown page state\n", page_to_pfn(p)); in me_unknown()
908 * Clean (or cleaned) page cache page.
910 static int me_pagecache_clean(struct page_state *ps, struct page *p) in me_pagecache_clean()
928 * Now truncate the page in the page cache. This is really in me_pagecache_clean()
937 * Page has been teared down in the meanwhile in me_pagecache_clean()
944 * The shmem page is kept in page cache instead of truncating in me_pagecache_clean()
965 * Dirty pagecache page
966 * Issues: when the error hit a hole page the error is not properly
969 static int me_pagecache_dirty(struct page_state *ps, struct page *p) in me_pagecache_dirty()
988 * and then through the PageError flag in the page. in me_pagecache_dirty()
995 * when the page is reread or dropped. If an in me_pagecache_dirty()
998 * and the page is dropped between then the error in me_pagecache_dirty()
1003 * report through AS_EIO) or when the page is dropped in me_pagecache_dirty()
1019 * Dirty swap cache page is tricky to handle. The page could live both in page
1020 * cache and swap cache(ie. page is freshly swapped in). So it could be
1028 * a later page fault, we know the application is accessing
1032 * Clean swap cache pages can be directly isolated. A later page fault will
1035 static int me_swapcache_dirty(struct page_state *ps, struct page *p) in me_swapcache_dirty()
1056 static int me_swapcache_clean(struct page_state *ps, struct page *p) in me_swapcache_clean()
1075 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1076 * To narrow down kill region to one page, we need to break up pmd.
1078 static int me_huge_page(struct page_state *ps, struct page *p) in me_huge_page()
1081 struct page *hpage = compound_head(p); in me_huge_page()
1091 /* The page is kept in page cache. */ in me_huge_page()
1117 * Various page states we can handle.
1119 * A page state is defined by its current page->flags bits.
1122 * This is quite tricky because we can access page at any time
1146 * Could in theory check if slab page is free or if we can drop
1183 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1195 static int page_action(struct page_state *ps, struct page *p, in page_action()
1200 /* page p should be unlocked after returning from ps->action(). */ in page_action()
1205 /* Could do more checks here if page looks ok */ in page_action()
1207 * Could adjust zone counters here to correct for the missing page. in page_action()
1213 static inline bool PageHWPoisonTakenOff(struct page *page) in PageHWPoisonTakenOff() argument
1215 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON; in PageHWPoisonTakenOff()
1218 void SetPageHWPoisonTakenOff(struct page *page) in SetPageHWPoisonTakenOff() argument
1220 set_page_private(page, MAGIC_HWPOISON); in SetPageHWPoisonTakenOff()
1223 void ClearPageHWPoisonTakenOff(struct page *page) in ClearPageHWPoisonTakenOff() argument
1225 if (PageHWPoison(page)) in ClearPageHWPoisonTakenOff()
1226 set_page_private(page, 0); in ClearPageHWPoisonTakenOff()
1230 * Return true if a page type of a given page is supported by hwpoison
1235 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) in HWPoisonHandlable() argument
1238 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) in HWPoisonHandlable()
1241 return PageLRU(page) || is_free_buddy_page(page); in HWPoisonHandlable()
1244 static int __get_hwpoison_page(struct page *page, unsigned long flags) in __get_hwpoison_page() argument
1246 struct page *head = compound_head(page); in __get_hwpoison_page()
1256 * unsupported type of page in order to reduce the risk of unexpected in __get_hwpoison_page()
1257 * races caused by taking a page refcount. in __get_hwpoison_page()
1263 if (head == compound_head(page)) in __get_hwpoison_page()
1266 pr_info("%#lx cannot catch tail\n", page_to_pfn(page)); in __get_hwpoison_page()
1273 static int get_any_page(struct page *p, unsigned long flags) in get_any_page()
1300 * page, retry. in get_any_page()
1315 * A page we cannot handle. Check whether we can turn in get_any_page()
1329 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p)); in get_any_page()
1334 static int __get_unpoison_page(struct page *page) in __get_unpoison_page() argument
1336 struct page *head = compound_head(page); in __get_unpoison_page()
1349 if (PageHWPoisonTakenOff(page)) in __get_unpoison_page()
1352 return get_page_unless_zero(page) ? 1 : 0; in __get_unpoison_page()
1357 * @p: Raw error page (hit by memory error)
1360 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1361 * error on it, after checking that the error page is in a well-defined state
1362 * (defined as a page-type we can successfully handle the memory error on it,
1363 * such as LRU page and hugetlb page).
1365 * Memory error handling could be triggered at any time on any type of page,
1368 * extra care for the error page's state (as done in __get_hwpoison_page()),
1372 * the given page has PG_hwpoison. So it's never reused for other page
1378 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1380 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1382 static int get_hwpoison_page(struct page *p, unsigned long flags) in get_hwpoison_page()
1400 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, in hwpoison_user_mappings()
1401 int flags, struct page *hpage) in hwpoison_user_mappings()
1433 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); in hwpoison_user_mappings()
1438 * Propagate the dirty bit from PTEs to struct page first, because we in hwpoison_user_mappings()
1439 * need this to decide if we should kill or just drop the page. in hwpoison_user_mappings()
1441 * be called inside page lock (it's recommended but not enforced). in hwpoison_user_mappings()
1450 pr_info("%#lx: corrupted page was clean: dropped without side effects\n", in hwpoison_user_mappings()
1456 * First collect all the processes that have the page in hwpoison_user_mappings()
1475 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); in hwpoison_user_mappings()
1482 pr_err("%#lx: failed to unmap page (mapcount=%d)\n", in hwpoison_user_mappings()
1486 * try_to_unmap() might put mlocked page in lru cache, so call in hwpoison_user_mappings()
1494 * struct page and all unmaps done we can decide if in hwpoison_user_mappings()
1495 * killing is needed or not. Only kill when the page in hwpoison_user_mappings()
1509 static int identify_page_state(unsigned long pfn, struct page *p, in identify_page_state()
1515 * The first check uses the current page flags which may not have any in identify_page_state()
1516 * relevant information. The second check with the saved page flags is in identify_page_state()
1517 * carried out only if the first check can't determine the page status. in identify_page_state()
1532 static int try_to_split_thp_page(struct page *page) in try_to_split_thp_page() argument
1536 lock_page(page); in try_to_split_thp_page()
1537 ret = split_huge_page(page); in try_to_split_thp_page()
1538 unlock_page(page); in try_to_split_thp_page()
1541 put_page(page); in try_to_split_thp_page()
1574 struct page *page = pfn_to_page(pfn); in mf_generic_kill_procs() local
1583 page = compound_head(page); in mf_generic_kill_procs()
1588 * lock_page(), but dax pages do not use the page lock. This in mf_generic_kill_procs()
1592 cookie = dax_lock_page(page); in mf_generic_kill_procs()
1596 if (hwpoison_filter(page)) { in mf_generic_kill_procs()
1615 * Use this flag as an indication that the dax page has been in mf_generic_kill_procs()
1618 SetPageHWPoison(page); in mf_generic_kill_procs()
1622 * different physical page at a given virtual address, so all in mf_generic_kill_procs()
1627 collect_procs(page, &to_kill, true); in mf_generic_kill_procs()
1629 unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); in mf_generic_kill_procs()
1631 dax_unlock_page(page, cookie); in mf_generic_kill_procs()
1648 struct page *page; in mf_dax_kill_procs() local
1654 page = NULL; in mf_dax_kill_procs()
1655 cookie = dax_lock_mapping_entry(mapping, index, &page); in mf_dax_kill_procs()
1658 if (!page) in mf_dax_kill_procs()
1661 SetPageHWPoison(page); in mf_dax_kill_procs()
1663 collect_procs_fsdax(page, mapping, index, &to_kill); in mf_dax_kill_procs()
1664 unmap_and_kill(&to_kill, page_to_pfn(page), mapping, in mf_dax_kill_procs()
1676 * Struct raw_hwp_page represents information about "raw error page",
1678 * SUBPAGE_INDEX_HWPOISON-th tail page.
1682 struct page *page; member
1685 static inline struct llist_head *raw_hwp_list_head(struct page *hpage) in raw_hwp_list_head()
1690 static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag) in __free_raw_hwp_pages()
1701 SetPageHWPoison(p->page); in __free_raw_hwp_pages()
1709 static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page) in hugetlb_set_page_hwpoison() argument
1727 if (p->page == page) in hugetlb_set_page_hwpoison()
1733 raw_hwp->page = page; in hugetlb_set_page_hwpoison()
1754 static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag) in free_raw_hwp_pages()
1773 void hugetlb_clear_page_hwpoison(struct page *hpage) in hugetlb_clear_page_hwpoison()
1793 struct page *page = pfn_to_page(pfn); in __get_huge_page_for_hwpoison() local
1794 struct page *head = compound_head(page); in __get_huge_page_for_hwpoison()
1795 int ret = 2; /* fallback to normal page handling */ in __get_huge_page_for_hwpoison()
1816 if (hugetlb_set_page_hwpoison(head, page)) { in __get_huge_page_for_hwpoison()
1837 struct page *p = pfn_to_page(pfn); in try_memory_failure_hugetlb()
1838 struct page *head; in try_memory_failure_hugetlb()
1844 if (res == 2) { /* fallback to normal page handling */ in try_memory_failure_hugetlb()
1910 static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag) in free_raw_hwp_pages()
1919 struct page *page = pfn_to_page(pfn); in memory_failure_dev_pagemap() local
1926 put_page(page); in memory_failure_dev_pagemap()
1957 * memory_failure - Handle memory failure of a page.
1958 * @pfn: Page Number of the corrupted page
1963 * of a page. It tries its best to recover, which includes
1979 struct page *p; in memory_failure()
1980 struct page *hpage; in memory_failure()
1988 panic("Memory failure on page %lx", pfn); in memory_failure()
2033 * 1) it's a free page, and therefore in safe hand: in memory_failure()
2035 * 2) it's part of a non-compound high order page. in memory_failure()
2037 * R/W the page; let's pray that the page has been in memory_failure()
2039 * In fact it's dangerous to directly bump up page count from 0, in memory_failure()
2082 * get_hwpoison_page() since they handle either free page in memory_failure()
2083 * or unhandlable page. The refcount is bumped iff the in memory_failure()
2084 * page is a valid handlable page. in memory_failure()
2101 * walked by the page reclaim code, however that's not a big loss. in memory_failure()
2108 * We're only intended to deal with the non-Compound page here. in memory_failure()
2109 * However, the page could have changed compound pages due to in memory_failure()
2111 * handle the page next round. in memory_failure()
2128 * We use page flags to determine what action should be taken, but in memory_failure()
2130 * example is an mlocked page, where PG_mlocked is cleared by in memory_failure()
2131 * page_remove_rmap() in try_to_unmap_one(). So to determine page status in memory_failure()
2132 * correctly, we save a copy of the page flags at this time. in memory_failure()
2145 * __munlock_pagevec may clear a writeback page's LRU flag without in memory_failure()
2146 * page_lock. We need wait writeback completion for this page or it in memory_failure()
2160 * Abort on fail: __filemap_remove_folio() assumes unmapped page. in memory_failure()
2207 * memory_failure_queue - Schedule handling memory failure of a page.
2208 * @pfn: Page Number of the corrupted page
2212 * when it detects hardware memory corruption of a page. It schedules
2213 * the recovering of error page, including dropping pages, killing
2302 * unpoison_memory - Unpoison a previously poisoned page
2303 * @pfn: Page number of the to be unpoisoned page
2305 * Software-unpoison a page that has been poisoned by
2315 struct page *page; in unpoison_memory() local
2316 struct page *p; in unpoison_memory()
2327 page = compound_head(p); in unpoison_memory()
2339 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", in unpoison_memory()
2344 if (page_count(page) > 1) { in unpoison_memory()
2345 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", in unpoison_memory()
2350 if (page_mapped(page)) { in unpoison_memory()
2351 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", in unpoison_memory()
2356 if (page_mapping(page)) { in unpoison_memory()
2357 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", in unpoison_memory()
2362 if (PageSlab(page) || PageTable(page) || PageReserved(page)) in unpoison_memory()
2368 count = free_raw_hwp_pages(page, false); in unpoison_memory()
2374 ret = TestClearPageHWPoison(page) ? 0 : -EBUSY; in unpoison_memory()
2379 unpoison_pr_info("Unpoison: failed to grab page %#lx\n", in unpoison_memory()
2383 count = free_raw_hwp_pages(page, false); in unpoison_memory()
2386 put_page(page); in unpoison_memory()
2392 put_page(page); in unpoison_memory()
2394 put_page(page); in unpoison_memory()
2403 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", in unpoison_memory()
2410 static bool isolate_page(struct page *page, struct list_head *pagelist) in isolate_page() argument
2414 if (PageHuge(page)) { in isolate_page()
2415 isolated = !isolate_hugetlb(page, pagelist); in isolate_page()
2417 bool lru = !__PageMovable(page); in isolate_page()
2420 isolated = !isolate_lru_page(page); in isolate_page()
2422 isolated = !isolate_movable_page(page, in isolate_page()
2426 list_add(&page->lru, pagelist); in isolate_page()
2428 inc_node_page_state(page, NR_ISOLATED_ANON + in isolate_page()
2429 page_is_file_lru(page)); in isolate_page()
2434 * If we succeed to isolate the page, we grabbed another refcount on in isolate_page()
2435 * the page, so we can safely drop the one we got from get_any_pages(). in isolate_page()
2436 * If we failed to isolate the page, it means that we cannot go further in isolate_page()
2440 put_page(page); in isolate_page()
2446 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2447 * If the page is mapped, it migrates the contents over.
2449 static int soft_offline_in_use_page(struct page *page) in soft_offline_in_use_page() argument
2452 unsigned long pfn = page_to_pfn(page); in soft_offline_in_use_page()
2453 struct page *hpage = compound_head(page); in soft_offline_in_use_page()
2454 char const *msg_page[] = {"page", "hugepage"}; in soft_offline_in_use_page()
2455 bool huge = PageHuge(page); in soft_offline_in_use_page()
2463 if (try_to_split_thp_page(page)) { in soft_offline_in_use_page()
2467 hpage = page; in soft_offline_in_use_page()
2470 lock_page(page); in soft_offline_in_use_page()
2471 if (!PageHuge(page)) in soft_offline_in_use_page()
2472 wait_on_page_writeback(page); in soft_offline_in_use_page()
2473 if (PageHWPoison(page)) { in soft_offline_in_use_page()
2474 unlock_page(page); in soft_offline_in_use_page()
2475 put_page(page); in soft_offline_in_use_page()
2476 pr_info("soft offline: %#lx page already poisoned\n", pfn); in soft_offline_in_use_page()
2480 if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page)) in soft_offline_in_use_page()
2483 * non dirty unmapped page cache pages. in soft_offline_in_use_page()
2485 ret = invalidate_inode_page(page); in soft_offline_in_use_page()
2486 unlock_page(page); in soft_offline_in_use_page()
2490 page_handle_poison(page, false, true); in soft_offline_in_use_page()
2500 if (!page_handle_poison(page, huge, release)) in soft_offline_in_use_page()
2507 pfn, msg_page[huge], ret, &page->flags); in soft_offline_in_use_page()
2512 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", in soft_offline_in_use_page()
2513 pfn, msg_page[huge], page_count(page), &page->flags); in soft_offline_in_use_page()
2519 static void put_ref_page(struct page *page) in put_ref_page() argument
2521 if (page) in put_ref_page()
2522 put_page(page); in put_ref_page()
2526 * soft_offline_page - Soft offline a page.
2534 * Soft offline a page, by migration or invalidation,
2536 * a page is not corrupted yet (so it's still valid to access),
2553 struct page *page, *ref_page = NULL; in soft_offline_page() local
2563 page = pfn_to_online_page(pfn); in soft_offline_page()
2564 if (!page) { in soft_offline_page()
2571 if (PageHWPoison(page)) { in soft_offline_page()
2572 pr_info("%s: %#lx page already poisoned\n", __func__, pfn); in soft_offline_page()
2580 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE); in soft_offline_page()
2583 if (hwpoison_filter(page)) { in soft_offline_page()
2585 put_page(page); in soft_offline_page()
2592 ret = soft_offline_in_use_page(page); in soft_offline_page()
2594 if (!page_handle_poison(page, true, false) && try_again) { in soft_offline_page()
2606 void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) in clear_hwpoisoned_pages()