Lines Matching full:page
46 /* How many pages do we try to swap or page in/out together? */
80 static void __page_cache_release(struct page *page) in __page_cache_release() argument
82 if (PageLRU(page)) { in __page_cache_release()
86 lruvec = lock_page_lruvec_irqsave(page, &flags); in __page_cache_release()
87 del_page_from_lru_list(page, lruvec); in __page_cache_release()
88 __clear_page_lru_flags(page); in __page_cache_release()
91 __ClearPageWaiters(page); in __page_cache_release()
94 static void __put_single_page(struct page *page) in __put_single_page() argument
96 __page_cache_release(page); in __put_single_page()
97 mem_cgroup_uncharge(page); in __put_single_page()
98 free_unref_page(page, 0); in __put_single_page()
101 static void __put_compound_page(struct page *page) in __put_compound_page() argument
105 * hugetlb. This is because hugetlb page does never have PageLRU set in __put_compound_page()
109 if (!PageHuge(page)) in __put_compound_page()
110 __page_cache_release(page); in __put_compound_page()
111 destroy_compound_page(page); in __put_compound_page()
114 void __put_page(struct page *page) in __put_page() argument
116 if (is_zone_device_page(page)) { in __put_page()
117 put_dev_pagemap(page->pgmap); in __put_page()
120 * The page belongs to the device that created pgmap. Do in __put_page()
121 * not return it to page allocator. in __put_page()
126 if (unlikely(PageCompound(page))) in __put_page()
127 __put_compound_page(page); in __put_page()
129 __put_single_page(page); in __put_page()
135 * @pages: list of pages threaded on page->lru
137 * Release a list of pages which are strung together on page.lru. Currently
143 struct page *victim; in put_pages_list()
162 * were pinned, returns -errno. Each page returned must be released
166 struct page **pages) in get_kernel_pages()
183 void (*move_fn)(struct page *page, struct lruvec *lruvec)) in pagevec_lru_move_fn() argument
190 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() local
192 /* block memcg migration during page moving between lru */ in pagevec_lru_move_fn()
193 if (!TestClearPageLRU(page)) in pagevec_lru_move_fn()
196 lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); in pagevec_lru_move_fn()
197 (*move_fn)(page, lruvec); in pagevec_lru_move_fn()
199 SetPageLRU(page); in pagevec_lru_move_fn()
207 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec) in pagevec_move_tail_fn() argument
209 if (!PageUnevictable(page)) { in pagevec_move_tail_fn()
210 del_page_from_lru_list(page, lruvec); in pagevec_move_tail_fn()
211 ClearPageActive(page); in pagevec_move_tail_fn()
212 add_page_to_lru_list_tail(page, lruvec); in pagevec_move_tail_fn()
213 __count_vm_events(PGROTATED, thp_nr_pages(page)); in pagevec_move_tail_fn()
218 static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) in pagevec_add_and_need_flush() argument
222 if (!pagevec_add(pvec, page) || PageCompound(page) || in pagevec_add_and_need_flush()
230 * Writeback is about to end against a page which has been marked for immediate
236 void rotate_reclaimable_page(struct page *page) in rotate_reclaimable_page() argument
238 if (!PageLocked(page) && !PageDirty(page) && in rotate_reclaimable_page()
239 !PageUnevictable(page) && PageLRU(page)) { in rotate_reclaimable_page()
243 get_page(page); in rotate_reclaimable_page()
246 if (pagevec_add_and_need_flush(pvec, page)) in rotate_reclaimable_page()
260 * 2) From a pre-LRU page during refault (which also holds the in lru_note_cost()
261 * rcu lock, so would be safe even if the page was on the LRU in lru_note_cost()
292 void lru_note_cost_page(struct page *page) in lru_note_cost_page() argument
294 lru_note_cost(mem_cgroup_page_lruvec(page), in lru_note_cost_page()
295 page_is_file_lru(page), thp_nr_pages(page)); in lru_note_cost_page()
298 static void __activate_page(struct page *page, struct lruvec *lruvec) in __activate_page() argument
300 if (!PageActive(page) && !PageUnevictable(page)) { in __activate_page()
301 int nr_pages = thp_nr_pages(page); in __activate_page()
303 del_page_from_lru_list(page, lruvec); in __activate_page()
304 SetPageActive(page); in __activate_page()
305 add_page_to_lru_list(page, lruvec); in __activate_page()
306 trace_mm_lru_activate(page); in __activate_page()
328 static void activate_page(struct page *page) in activate_page() argument
330 page = compound_head(page); in activate_page()
331 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in activate_page()
336 get_page(page); in activate_page()
337 if (pagevec_add_and_need_flush(pvec, page)) in activate_page()
348 static void activate_page(struct page *page) in activate_page() argument
352 page = compound_head(page); in activate_page()
353 if (TestClearPageLRU(page)) { in activate_page()
354 lruvec = lock_page_lruvec_irq(page); in activate_page()
355 __activate_page(page, lruvec); in activate_page()
357 SetPageLRU(page); in activate_page()
362 static void __lru_cache_activate_page(struct page *page) in __lru_cache_activate_page() argument
371 * Search backwards on the optimistic assumption that the page being in __lru_cache_activate_page()
373 * the local pagevec is examined as a !PageLRU page could be in the in __lru_cache_activate_page()
376 * a remote pagevec's page PageActive potentially hits a race where in __lru_cache_activate_page()
377 * a page is marked PageActive just after it is added to the inactive in __lru_cache_activate_page()
381 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
383 if (pagevec_page == page) { in __lru_cache_activate_page()
384 SetPageActive(page); in __lru_cache_activate_page()
393 * Mark a page as having seen activity.
399 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
400 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
402 void mark_page_accessed(struct page *page) in mark_page_accessed() argument
404 page = compound_head(page); in mark_page_accessed()
406 if (!PageReferenced(page)) { in mark_page_accessed()
407 SetPageReferenced(page); in mark_page_accessed()
408 } else if (PageUnevictable(page)) { in mark_page_accessed()
412 * evictable page accessed has no effect. in mark_page_accessed()
414 } else if (!PageActive(page)) { in mark_page_accessed()
416 * If the page is on the LRU, queue it for activation via in mark_page_accessed()
417 * lru_pvecs.activate_page. Otherwise, assume the page is on a in mark_page_accessed()
421 if (PageLRU(page)) in mark_page_accessed()
422 activate_page(page); in mark_page_accessed()
424 __lru_cache_activate_page(page); in mark_page_accessed()
425 ClearPageReferenced(page); in mark_page_accessed()
426 workingset_activation(page); in mark_page_accessed()
428 if (page_is_idle(page)) in mark_page_accessed()
429 clear_page_idle(page); in mark_page_accessed()
434 * lru_cache_add - add a page to a page list
435 * @page: the page to be added to the LRU.
437 * Queue the page for addition to the LRU via pagevec. The decision on whether
438 * to add the page to the [in]active [file|anon] list is deferred until the
440 * have the page added to the active list using mark_page_accessed().
442 void lru_cache_add(struct page *page) in lru_cache_add() argument
446 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); in lru_cache_add()
447 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add()
449 get_page(page); in lru_cache_add()
452 if (pagevec_add_and_need_flush(pvec, page)) in lru_cache_add()
460 * @page: the page to be added to LRU
461 * @vma: vma in which page is mapped for determining reclaimability
463 * Place @page on the inactive or unevictable LRU list, depending on its
466 void lru_cache_add_inactive_or_unevictable(struct page *page, in lru_cache_add_inactive_or_unevictable() argument
471 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add_inactive_or_unevictable()
474 if (unlikely(unevictable) && !TestSetPageMlocked(page)) { in lru_cache_add_inactive_or_unevictable()
475 int nr_pages = thp_nr_pages(page); in lru_cache_add_inactive_or_unevictable()
481 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in lru_cache_add_inactive_or_unevictable()
484 lru_cache_add(page); in lru_cache_add_inactive_or_unevictable()
488 * If the page can not be invalidated, it is moved to the
492 * effective than the single-page writeout from reclaim.
494 * If the page isn't page_mapped and dirty/writeback, the page
497 * 1. active, mapped page -> none
498 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
499 * 3. inactive, mapped page -> none
500 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
504 * In 4, why it moves inactive's head, the VM expects the page would
506 * than the single-page writeout from reclaim.
508 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) in lru_deactivate_file_fn() argument
510 bool active = PageActive(page); in lru_deactivate_file_fn()
511 int nr_pages = thp_nr_pages(page); in lru_deactivate_file_fn()
513 if (PageUnevictable(page)) in lru_deactivate_file_fn()
516 /* Some processes are using the page */ in lru_deactivate_file_fn()
517 if (page_mapped(page)) in lru_deactivate_file_fn()
520 del_page_from_lru_list(page, lruvec); in lru_deactivate_file_fn()
521 ClearPageActive(page); in lru_deactivate_file_fn()
522 ClearPageReferenced(page); in lru_deactivate_file_fn()
524 if (PageWriteback(page) || PageDirty(page)) { in lru_deactivate_file_fn()
530 add_page_to_lru_list(page, lruvec); in lru_deactivate_file_fn()
531 SetPageReclaim(page); in lru_deactivate_file_fn()
534 * The page's writeback ends up during pagevec in lru_deactivate_file_fn()
535 * We move that page into tail of inactive. in lru_deactivate_file_fn()
537 add_page_to_lru_list_tail(page, lruvec); in lru_deactivate_file_fn()
548 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) in lru_deactivate_fn() argument
550 if (PageActive(page) && !PageUnevictable(page)) { in lru_deactivate_fn()
551 int nr_pages = thp_nr_pages(page); in lru_deactivate_fn()
553 del_page_from_lru_list(page, lruvec); in lru_deactivate_fn()
554 ClearPageActive(page); in lru_deactivate_fn()
555 ClearPageReferenced(page); in lru_deactivate_fn()
556 add_page_to_lru_list(page, lruvec); in lru_deactivate_fn()
564 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec) in lru_lazyfree_fn() argument
566 if (PageAnon(page) && PageSwapBacked(page) && in lru_lazyfree_fn()
567 !PageSwapCache(page) && !PageUnevictable(page)) { in lru_lazyfree_fn()
568 int nr_pages = thp_nr_pages(page); in lru_lazyfree_fn()
570 del_page_from_lru_list(page, lruvec); in lru_lazyfree_fn()
571 ClearPageActive(page); in lru_lazyfree_fn()
572 ClearPageReferenced(page); in lru_lazyfree_fn()
578 ClearPageSwapBacked(page); in lru_lazyfree_fn()
579 add_page_to_lru_list(page, lruvec); in lru_lazyfree_fn()
626 * deactivate_file_page - forcefully deactivate a file page
627 * @page: page to deactivate
629 * This function hints the VM that @page is a good reclaim candidate,
630 * for example if its invalidation fails due to the page being dirty
633 void deactivate_file_page(struct page *page) in deactivate_file_page() argument
636 * In a workload with many unevictable page such as mprotect, in deactivate_file_page()
637 * unevictable page deactivation for accelerating reclaim is pointless. in deactivate_file_page()
639 if (PageUnevictable(page)) in deactivate_file_page()
642 if (likely(get_page_unless_zero(page))) { in deactivate_file_page()
648 if (pagevec_add_and_need_flush(pvec, page)) in deactivate_file_page()
655 * deactivate_page - deactivate a page
656 * @page: page to deactivate
658 * deactivate_page() moves @page to the inactive list if @page was on the active
659 * list and was not an unevictable page. This is done to accelerate the reclaim
660 * of @page.
662 void deactivate_page(struct page *page) in deactivate_page() argument
664 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { in deactivate_page()
669 get_page(page); in deactivate_page()
670 if (pagevec_add_and_need_flush(pvec, page)) in deactivate_page()
677 * mark_page_lazyfree - make an anon page lazyfree
678 * @page: page to deactivate
680 * mark_page_lazyfree() moves @page to the inactive file list.
681 * This is done to accelerate the reclaim of @page.
683 void mark_page_lazyfree(struct page *page) in mark_page_lazyfree() argument
685 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in mark_page_lazyfree()
686 !PageSwapCache(page) && !PageUnevictable(page)) { in mark_page_lazyfree()
691 get_page(page); in mark_page_lazyfree()
692 if (pagevec_add_and_need_flush(pvec, page)) in mark_page_lazyfree()
799 * below which drains the page vectors. in __lru_add_drain_all()
884 * fell to zero, remove the page from the LRU and free it.
886 void release_pages(struct page **pages, int nr) in release_pages()
895 struct page *page = pages[i]; in release_pages() local
907 page = compound_head(page); in release_pages()
908 if (is_huge_zero_page(page)) in release_pages()
911 if (is_zone_device_page(page)) { in release_pages()
922 if (page_is_devmap_managed(page)) { in release_pages()
923 put_devmap_managed_page(page); in release_pages()
926 if (put_page_testzero(page)) in release_pages()
927 put_dev_pagemap(page->pgmap); in release_pages()
931 if (!put_page_testzero(page)) in release_pages()
934 if (PageCompound(page)) { in release_pages()
939 __put_compound_page(page); in release_pages()
943 if (PageLRU(page)) { in release_pages()
946 lruvec = relock_page_lruvec_irqsave(page, lruvec, in release_pages()
951 del_page_from_lru_list(page, lruvec); in release_pages()
952 __clear_page_lru_flags(page); in release_pages()
955 __ClearPageWaiters(page); in release_pages()
957 list_add(&page->lru, &pages_to_free); in release_pages()
971 * cache-warm and we want to give them back to the page allocator ASAP.
988 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec) in __pagevec_lru_add_fn() argument
990 int was_unevictable = TestClearPageUnevictable(page); in __pagevec_lru_add_fn()
991 int nr_pages = thp_nr_pages(page); in __pagevec_lru_add_fn()
993 VM_BUG_ON_PAGE(PageLRU(page), page); in __pagevec_lru_add_fn()
996 * Page becomes evictable in two ways: in __pagevec_lru_add_fn()
998 * 2) Before acquiring LRU lock to put the page to correct LRU and then in __pagevec_lru_add_fn()
1015 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU in __pagevec_lru_add_fn()
1017 * the isolation of the page whose Mlocked bit is cleared (#0 is also in __pagevec_lru_add_fn()
1018 * looking at the same page) and the evictable page will be stranded in __pagevec_lru_add_fn()
1021 SetPageLRU(page); in __pagevec_lru_add_fn()
1024 if (page_evictable(page)) { in __pagevec_lru_add_fn()
1028 ClearPageActive(page); in __pagevec_lru_add_fn()
1029 SetPageUnevictable(page); in __pagevec_lru_add_fn()
1034 add_page_to_lru_list(page, lruvec); in __pagevec_lru_add_fn()
1035 trace_mm_lru_insertion(page); in __pagevec_lru_add_fn()
1049 struct page *page = pvec->pages[i]; in __pagevec_lru_add() local
1051 lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); in __pagevec_lru_add()
1052 __pagevec_lru_add_fn(page, lruvec); in __pagevec_lru_add()
1067 * passed on to page-only pagevec operations.
1074 struct page *page = pvec->pages[i]; in pagevec_remove_exceptionals() local
1075 if (!xa_is_value(page)) in pagevec_remove_exceptionals()
1076 pvec->pages[j++] = page; in pagevec_remove_exceptionals()
1085 * @start: The starting page index
1086 * @end: The final page index
1095 * also update @start to index the next page for the traversal.
1139 void put_devmap_managed_page(struct page *page) in put_devmap_managed_page() argument
1143 if (WARN_ON_ONCE(!page_is_devmap_managed(page))) in put_devmap_managed_page()
1146 count = page_ref_dec_return(page); in put_devmap_managed_page()
1149 * devmap page refcounts are 1-based, rather than 0-based: if in put_devmap_managed_page()
1150 * refcount is 1, then the page is free and the refcount is in put_devmap_managed_page()
1151 * stable because nobody holds a reference on the page. in put_devmap_managed_page()
1154 free_devmap_managed_page(page); in put_devmap_managed_page()
1156 __put_page(page); in put_devmap_managed_page()