Lines Matching refs:page
59 static void __page_cache_release(struct page *page) in __page_cache_release() argument
61 if (PageLRU(page)) { in __page_cache_release()
62 struct zone *zone = page_zone(page); in __page_cache_release()
67 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); in __page_cache_release()
68 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
69 __ClearPageLRU(page); in __page_cache_release()
70 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
73 __ClearPageWaiters(page); in __page_cache_release()
74 mem_cgroup_uncharge(page); in __page_cache_release()
77 static void __put_single_page(struct page *page) in __put_single_page() argument
79 __page_cache_release(page); in __put_single_page()
80 free_unref_page(page); in __put_single_page()
83 static void __put_compound_page(struct page *page) in __put_compound_page() argument
93 if (!PageHuge(page)) in __put_compound_page()
94 __page_cache_release(page); in __put_compound_page()
95 dtor = get_compound_page_dtor(page); in __put_compound_page()
96 (*dtor)(page); in __put_compound_page()
99 void __put_page(struct page *page) in __put_page() argument
101 if (is_zone_device_page(page)) { in __put_page()
102 put_dev_pagemap(page->pgmap); in __put_page()
111 if (unlikely(PageCompound(page))) in __put_page()
112 __put_compound_page(page); in __put_page()
114 __put_single_page(page); in __put_page()
128 struct page *victim; in put_pages_list()
130 victim = list_entry(pages->prev, struct page, lru); in put_pages_list()
151 struct page **pages) in get_kernel_pages()
178 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page()
190 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), in pagevec_lru_move_fn() argument
199 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() local
200 struct pglist_data *pagepgdat = page_pgdat(page); in pagevec_lru_move_fn()
209 lruvec = mem_cgroup_page_lruvec(page, pgdat); in pagevec_lru_move_fn()
210 (*move_fn)(page, lruvec, arg); in pagevec_lru_move_fn()
218 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, in pagevec_move_tail_fn() argument
223 if (PageLRU(page) && !PageUnevictable(page)) { in pagevec_move_tail_fn()
224 del_page_from_lru_list(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
225 ClearPageActive(page); in pagevec_move_tail_fn()
226 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
248 void rotate_reclaimable_page(struct page *page) in rotate_reclaimable_page() argument
250 if (!PageLocked(page) && !PageDirty(page) && in rotate_reclaimable_page()
251 !PageUnevictable(page) && PageLRU(page)) { in rotate_reclaimable_page()
255 get_page(page); in rotate_reclaimable_page()
258 if (!pagevec_add(pvec, page) || PageCompound(page)) in rotate_reclaimable_page()
274 static void __activate_page(struct page *page, struct lruvec *lruvec, in __activate_page() argument
277 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in __activate_page()
278 int file = page_is_file_cache(page); in __activate_page()
279 int lru = page_lru_base_type(page); in __activate_page()
281 del_page_from_lru_list(page, lruvec, lru); in __activate_page()
282 SetPageActive(page); in __activate_page()
284 add_page_to_lru_list(page, lruvec, lru); in __activate_page()
285 trace_mm_lru_activate(page); in __activate_page()
306 void activate_page(struct page *page) in activate_page() argument
308 page = compound_head(page); in activate_page()
309 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in activate_page()
312 get_page(page); in activate_page()
313 if (!pagevec_add(pvec, page) || PageCompound(page)) in activate_page()
329 void activate_page(struct page *page) in activate_page() argument
331 struct zone *zone = page_zone(page); in activate_page()
333 page = compound_head(page); in activate_page()
335 __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL); in activate_page()
340 static void __lru_cache_activate_page(struct page *page) in __lru_cache_activate_page() argument
356 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
358 if (pagevec_page == page) { in __lru_cache_activate_page()
359 SetPageActive(page); in __lru_cache_activate_page()
377 void mark_page_accessed(struct page *page) in mark_page_accessed() argument
379 page = compound_head(page); in mark_page_accessed()
380 if (!PageActive(page) && !PageUnevictable(page) && in mark_page_accessed()
381 PageReferenced(page)) { in mark_page_accessed()
389 if (PageLRU(page)) in mark_page_accessed()
390 activate_page(page); in mark_page_accessed()
392 __lru_cache_activate_page(page); in mark_page_accessed()
393 ClearPageReferenced(page); in mark_page_accessed()
394 if (page_is_file_cache(page)) in mark_page_accessed()
395 workingset_activation(page); in mark_page_accessed()
396 } else if (!PageReferenced(page)) { in mark_page_accessed()
397 SetPageReferenced(page); in mark_page_accessed()
399 if (page_is_idle(page)) in mark_page_accessed()
400 clear_page_idle(page); in mark_page_accessed()
404 static void __lru_cache_add(struct page *page) in __lru_cache_add() argument
408 get_page(page); in __lru_cache_add()
409 if (!pagevec_add(pvec, page) || PageCompound(page)) in __lru_cache_add()
418 void lru_cache_add_anon(struct page *page) in lru_cache_add_anon() argument
420 if (PageActive(page)) in lru_cache_add_anon()
421 ClearPageActive(page); in lru_cache_add_anon()
422 __lru_cache_add(page); in lru_cache_add_anon()
425 void lru_cache_add_file(struct page *page) in lru_cache_add_file() argument
427 if (PageActive(page)) in lru_cache_add_file()
428 ClearPageActive(page); in lru_cache_add_file()
429 __lru_cache_add(page); in lru_cache_add_file()
442 void lru_cache_add(struct page *page) in lru_cache_add() argument
444 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); in lru_cache_add()
445 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add()
446 __lru_cache_add(page); in lru_cache_add()
459 void lru_cache_add_active_or_unevictable(struct page *page, in lru_cache_add_active_or_unevictable() argument
462 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add_active_or_unevictable()
465 SetPageActive(page); in lru_cache_add_active_or_unevictable()
466 else if (!TestSetPageMlocked(page)) { in lru_cache_add_active_or_unevictable()
472 __mod_zone_page_state(page_zone(page), NR_MLOCK, in lru_cache_add_active_or_unevictable()
473 hpage_nr_pages(page)); in lru_cache_add_active_or_unevictable()
476 lru_cache_add(page); in lru_cache_add_active_or_unevictable()
500 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_file_fn() argument
506 if (!PageLRU(page)) in lru_deactivate_file_fn()
509 if (PageUnevictable(page)) in lru_deactivate_file_fn()
513 if (page_mapped(page)) in lru_deactivate_file_fn()
516 active = PageActive(page); in lru_deactivate_file_fn()
517 file = page_is_file_cache(page); in lru_deactivate_file_fn()
518 lru = page_lru_base_type(page); in lru_deactivate_file_fn()
520 del_page_from_lru_list(page, lruvec, lru + active); in lru_deactivate_file_fn()
521 ClearPageActive(page); in lru_deactivate_file_fn()
522 ClearPageReferenced(page); in lru_deactivate_file_fn()
523 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_file_fn()
525 if (PageWriteback(page) || PageDirty(page)) { in lru_deactivate_file_fn()
531 SetPageReclaim(page); in lru_deactivate_file_fn()
537 list_move_tail(&page->lru, &lruvec->lists[lru]); in lru_deactivate_file_fn()
547 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, in lru_lazyfree_fn() argument
550 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in lru_lazyfree_fn()
551 !PageSwapCache(page) && !PageUnevictable(page)) { in lru_lazyfree_fn()
552 bool active = PageActive(page); in lru_lazyfree_fn()
554 del_page_from_lru_list(page, lruvec, in lru_lazyfree_fn()
556 ClearPageActive(page); in lru_lazyfree_fn()
557 ClearPageReferenced(page); in lru_lazyfree_fn()
563 ClearPageSwapBacked(page); in lru_lazyfree_fn()
564 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_fn()
566 __count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); in lru_lazyfree_fn()
567 count_memcg_page_event(page, PGLAZYFREE); in lru_lazyfree_fn()
613 void deactivate_file_page(struct page *page) in deactivate_file_page() argument
619 if (PageUnevictable(page)) in deactivate_file_page()
622 if (likely(get_page_unless_zero(page))) { in deactivate_file_page()
625 if (!pagevec_add(pvec, page) || PageCompound(page)) in deactivate_file_page()
638 void mark_page_lazyfree(struct page *page) in mark_page_lazyfree() argument
640 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in mark_page_lazyfree()
641 !PageSwapCache(page) && !PageUnevictable(page)) { in mark_page_lazyfree()
644 get_page(page); in mark_page_lazyfree()
645 if (!pagevec_add(pvec, page) || PageCompound(page)) in mark_page_lazyfree()
715 void release_pages(struct page **pages, int nr) in release_pages()
725 struct page *page = pages[i]; in release_pages() local
737 if (is_huge_zero_page(page)) in release_pages()
741 if (is_device_public_page(page)) { in release_pages()
747 put_devmap_managed_page(page); in release_pages()
751 page = compound_head(page); in release_pages()
752 if (!put_page_testzero(page)) in release_pages()
755 if (PageCompound(page)) { in release_pages()
760 __put_compound_page(page); in release_pages()
764 if (PageLRU(page)) { in release_pages()
765 struct pglist_data *pgdat = page_pgdat(page); in release_pages()
776 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); in release_pages()
777 VM_BUG_ON_PAGE(!PageLRU(page), page); in release_pages()
778 __ClearPageLRU(page); in release_pages()
779 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in release_pages()
783 __ClearPageActive(page); in release_pages()
784 __ClearPageWaiters(page); in release_pages()
786 list_add(&page->lru, &pages_to_free); in release_pages()
819 void lru_add_page_tail(struct page *page, struct page *page_tail, in lru_add_page_tail() argument
824 VM_BUG_ON_PAGE(!PageHead(page), page); in lru_add_page_tail()
825 VM_BUG_ON_PAGE(PageCompound(page_tail), page); in lru_add_page_tail()
826 VM_BUG_ON_PAGE(PageLRU(page_tail), page); in lru_add_page_tail()
833 if (likely(PageLRU(page))) in lru_add_page_tail()
834 list_add_tail(&page_tail->lru, &page->lru); in lru_add_page_tail()
853 if (!PageUnevictable(page)) in lru_add_page_tail()
858 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, in __pagevec_lru_add_fn() argument
862 int was_unevictable = TestClearPageUnevictable(page); in __pagevec_lru_add_fn()
864 VM_BUG_ON_PAGE(PageLRU(page), page); in __pagevec_lru_add_fn()
866 SetPageLRU(page); in __pagevec_lru_add_fn()
895 if (page_evictable(page)) { in __pagevec_lru_add_fn()
896 lru = page_lru(page); in __pagevec_lru_add_fn()
897 update_page_reclaim_stat(lruvec, page_is_file_cache(page), in __pagevec_lru_add_fn()
898 PageActive(page)); in __pagevec_lru_add_fn()
903 ClearPageActive(page); in __pagevec_lru_add_fn()
904 SetPageUnevictable(page); in __pagevec_lru_add_fn()
909 add_page_to_lru_list(page, lruvec, lru); in __pagevec_lru_add_fn()
910 trace_mm_lru_insertion(page, lru); in __pagevec_lru_add_fn()
967 struct page *page = pvec->pages[i]; in pagevec_remove_exceptionals() local
968 if (!radix_tree_exceptional_entry(page)) in pagevec_remove_exceptionals()
969 pvec->pages[j++] = page; in pagevec_remove_exceptionals()