Lines Matching refs:folio

179 			struct folio *prev;				\
1050 static inline int is_page_cache_freeable(struct folio *folio) in is_page_cache_freeable() argument
1057 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
1058 1 + folio_nr_pages(folio); in is_page_cache_freeable()
1074 struct folio *folio, int error) in handle_write_error() argument
1076 folio_lock(folio); in handle_write_error()
1077 if (folio_mapping(folio) == mapping) in handle_write_error()
1079 folio_unlock(folio); in handle_write_error()
1189 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, in __acct_reclaim_writeback() argument
1194 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); in __acct_reclaim_writeback()
1226 static pageout_t pageout(struct folio *folio, struct address_space *mapping, in pageout() argument
1245 if (!is_page_cache_freeable(folio)) in pageout()
1252 if (folio_test_private(folio)) { in pageout()
1253 if (try_to_free_buffers(folio)) { in pageout()
1254 folio_clear_dirty(folio); in pageout()
1264 if (folio_clear_dirty_for_io(folio)) { in pageout()
1275 folio_set_reclaim(folio); in pageout()
1276 res = mapping->a_ops->writepage(&folio->page, &wbc); in pageout()
1278 handle_write_error(mapping, folio, res); in pageout()
1280 folio_clear_reclaim(folio); in pageout()
1284 if (!folio_test_writeback(folio)) { in pageout()
1286 folio_clear_reclaim(folio); in pageout()
1288 trace_mm_vmscan_write_folio(folio); in pageout()
1289 node_stat_add_folio(folio, NR_VMSCAN_WRITE); in pageout()
1300 static int __remove_mapping(struct address_space *mapping, struct folio *folio, in __remove_mapping() argument
1306 BUG_ON(!folio_test_locked(folio)); in __remove_mapping()
1307 BUG_ON(mapping != folio_mapping(folio)); in __remove_mapping()
1309 if (!folio_test_swapcache(folio)) in __remove_mapping()
1337 refcount = 1 + folio_nr_pages(folio); in __remove_mapping()
1338 if (!folio_ref_freeze(folio, refcount)) in __remove_mapping()
1341 if (unlikely(folio_test_dirty(folio))) { in __remove_mapping()
1342 folio_ref_unfreeze(folio, refcount); in __remove_mapping()
1346 if (folio_test_swapcache(folio)) { in __remove_mapping()
1347 swp_entry_t swap = folio_swap_entry(folio); in __remove_mapping()
1351 shadow = workingset_eviction(folio, target_memcg); in __remove_mapping()
1352 mem_cgroup_swapout(folio, swap); in __remove_mapping()
1353 __delete_from_swap_cache(folio, swap, shadow); in __remove_mapping()
1355 put_swap_folio(folio, swap); in __remove_mapping()
1357 void (*free_folio)(struct folio *); in __remove_mapping()
1376 if (reclaimed && folio_is_file_lru(folio) && in __remove_mapping()
1378 shadow = workingset_eviction(folio, target_memcg); in __remove_mapping()
1379 __filemap_remove_folio(folio, shadow); in __remove_mapping()
1386 free_folio(folio); in __remove_mapping()
1393 if (!folio_test_swapcache(folio)) in __remove_mapping()
1410 long remove_mapping(struct address_space *mapping, struct folio *folio) in remove_mapping() argument
1412 if (__remove_mapping(mapping, folio, false, NULL)) { in remove_mapping()
1418 folio_ref_unfreeze(folio, 1); in remove_mapping()
1419 return folio_nr_pages(folio); in remove_mapping()
1433 void folio_putback_lru(struct folio *folio) in folio_putback_lru() argument
1435 folio_add_lru(folio); in folio_putback_lru()
1436 folio_put(folio); /* drop ref from isolate */ in folio_putback_lru()
1446 static enum folio_references folio_check_references(struct folio *folio, in folio_check_references() argument
1452 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
1454 referenced_folio = folio_test_clear_referenced(folio); in folio_check_references()
1482 folio_set_referenced(folio); in folio_check_references()
1490 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) in folio_check_references()
1497 if (referenced_folio && folio_is_file_lru(folio)) in folio_check_references()
1504 static void folio_check_dirty_writeback(struct folio *folio, in folio_check_dirty_writeback() argument
1516 if (!folio_is_file_lru(folio) || in folio_check_dirty_writeback()
1517 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { in folio_check_dirty_writeback()
1524 *dirty = folio_test_dirty(folio); in folio_check_dirty_writeback()
1525 *writeback = folio_test_writeback(folio); in folio_check_dirty_writeback()
1528 if (!folio_test_private(folio)) in folio_check_dirty_writeback()
1531 mapping = folio_mapping(folio); in folio_check_dirty_writeback()
1533 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
1610 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) in may_enter_fs() argument
1614 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) in may_enter_fs()
1623 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); in may_enter_fs()
1648 struct folio *folio; in shrink_folio_list() local
1655 folio = lru_to_folio(folio_list); in shrink_folio_list()
1656 list_del(&folio->lru); in shrink_folio_list()
1658 if (!folio_trylock(folio)) in shrink_folio_list()
1661 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); in shrink_folio_list()
1663 nr_pages = folio_nr_pages(folio); in shrink_folio_list()
1668 if (unlikely(!folio_evictable(folio))) in shrink_folio_list()
1671 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1676 folio_mapped(folio) && folio_test_referenced(folio)) in shrink_folio_list()
1684 folio_check_dirty_writeback(folio, &dirty, &writeback); in shrink_folio_list()
1697 if (writeback && folio_test_reclaim(folio)) in shrink_folio_list()
1744 if (folio_test_writeback(folio)) { in shrink_folio_list()
1747 folio_test_reclaim(folio) && in shrink_folio_list()
1754 !folio_test_reclaim(folio) || in shrink_folio_list()
1755 !may_enter_fs(folio, sc->gfp_mask)) { in shrink_folio_list()
1770 folio_set_reclaim(folio); in shrink_folio_list()
1776 folio_unlock(folio); in shrink_folio_list()
1777 folio_wait_writeback(folio); in shrink_folio_list()
1779 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1785 references = folio_check_references(folio, sc); in shrink_folio_list()
1803 (thp_migration_supported() || !folio_test_large(folio))) { in shrink_folio_list()
1804 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1805 folio_unlock(folio); in shrink_folio_list()
1814 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { in shrink_folio_list()
1815 if (!folio_test_swapcache(folio)) { in shrink_folio_list()
1818 if (folio_maybe_dma_pinned(folio)) in shrink_folio_list()
1820 if (folio_test_large(folio)) { in shrink_folio_list()
1822 if (!can_split_folio(folio, NULL)) in shrink_folio_list()
1829 if (!folio_entire_mapcount(folio) && in shrink_folio_list()
1830 split_folio_to_list(folio, in shrink_folio_list()
1834 if (!add_to_swap(folio)) { in shrink_folio_list()
1835 if (!folio_test_large(folio)) in shrink_folio_list()
1838 if (split_folio_to_list(folio, in shrink_folio_list()
1844 if (!add_to_swap(folio)) in shrink_folio_list()
1848 } else if (folio_test_swapbacked(folio) && in shrink_folio_list()
1849 folio_test_large(folio)) { in shrink_folio_list()
1851 if (split_folio_to_list(folio, folio_list)) in shrink_folio_list()
1860 if ((nr_pages > 1) && !folio_test_large(folio)) { in shrink_folio_list()
1869 if (folio_mapped(folio)) { in shrink_folio_list()
1871 bool was_swapbacked = folio_test_swapbacked(folio); in shrink_folio_list()
1873 if (folio_test_pmd_mappable(folio)) in shrink_folio_list()
1876 try_to_unmap(folio, flags); in shrink_folio_list()
1877 if (folio_mapped(folio)) { in shrink_folio_list()
1880 folio_test_swapbacked(folio)) in shrink_folio_list()
1886 mapping = folio_mapping(folio); in shrink_folio_list()
1887 if (folio_test_dirty(folio)) { in shrink_folio_list()
1899 if (folio_is_file_lru(folio) && in shrink_folio_list()
1901 !folio_test_reclaim(folio) || in shrink_folio_list()
1909 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, in shrink_folio_list()
1911 folio_set_reclaim(folio); in shrink_folio_list()
1918 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
1929 switch (pageout(folio, mapping, &plug)) { in shrink_folio_list()
1937 if (folio_test_writeback(folio)) in shrink_folio_list()
1939 if (folio_test_dirty(folio)) in shrink_folio_list()
1946 if (!folio_trylock(folio)) in shrink_folio_list()
1948 if (folio_test_dirty(folio) || in shrink_folio_list()
1949 folio_test_writeback(folio)) in shrink_folio_list()
1951 mapping = folio_mapping(folio); in shrink_folio_list()
1981 if (folio_has_private(folio)) { in shrink_folio_list()
1982 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
1984 if (!mapping && folio_ref_count(folio) == 1) { in shrink_folio_list()
1985 folio_unlock(folio); in shrink_folio_list()
1986 if (folio_put_testzero(folio)) in shrink_folio_list()
2002 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { in shrink_folio_list()
2004 if (!folio_ref_freeze(folio, 1)) in shrink_folio_list()
2015 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); in shrink_folio_list()
2016 } else if (!mapping || !__remove_mapping(mapping, folio, true, in shrink_folio_list()
2020 folio_unlock(folio); in shrink_folio_list()
2032 if (unlikely(folio_test_large(folio))) in shrink_folio_list()
2033 destroy_large_folio(folio); in shrink_folio_list()
2035 list_add(&folio->lru, &free_folios); in shrink_folio_list()
2049 if (folio_test_swapcache(folio) && in shrink_folio_list()
2050 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) in shrink_folio_list()
2051 folio_free_swap(folio); in shrink_folio_list()
2052 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); in shrink_folio_list()
2053 if (!folio_test_mlocked(folio)) { in shrink_folio_list()
2054 int type = folio_is_file_lru(folio); in shrink_folio_list()
2055 folio_set_active(folio); in shrink_folio_list()
2057 count_memcg_folio_events(folio, PGACTIVATE, nr_pages); in shrink_folio_list()
2060 folio_unlock(folio); in shrink_folio_list()
2062 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
2063 VM_BUG_ON_FOLIO(folio_test_lru(folio) || in shrink_folio_list()
2064 folio_test_unevictable(folio), folio); in shrink_folio_list()
2101 struct folio *folio, *next; in reclaim_clean_pages_from_list() local
2105 list_for_each_entry_safe(folio, next, folio_list, lru) { in reclaim_clean_pages_from_list()
2106 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && in reclaim_clean_pages_from_list()
2107 !folio_test_dirty(folio) && !__folio_test_movable(folio) && in reclaim_clean_pages_from_list()
2108 !folio_test_unevictable(folio)) { in reclaim_clean_pages_from_list()
2109 folio_clear_active(folio); in reclaim_clean_pages_from_list()
2110 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
2197 struct folio *folio; in isolate_lru_folios() local
2199 folio = lru_to_folio(src); in isolate_lru_folios()
2200 prefetchw_prev_lru_folio(folio, src, flags); in isolate_lru_folios()
2202 nr_pages = folio_nr_pages(folio); in isolate_lru_folios()
2205 if (folio_zonenum(folio) > sc->reclaim_idx) { in isolate_lru_folios()
2206 nr_skipped[folio_zonenum(folio)] += nr_pages; in isolate_lru_folios()
2220 if (!folio_test_lru(folio)) in isolate_lru_folios()
2222 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
2230 if (unlikely(!folio_try_get(folio))) in isolate_lru_folios()
2233 if (!folio_test_clear_lru(folio)) { in isolate_lru_folios()
2235 folio_put(folio); in isolate_lru_folios()
2240 nr_zone_taken[folio_zonenum(folio)] += nr_pages; in isolate_lru_folios()
2243 list_move(&folio->lru, move_to); in isolate_lru_folios()
2296 int folio_isolate_lru(struct folio *folio) in folio_isolate_lru() argument
2300 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); in folio_isolate_lru()
2302 if (folio_test_clear_lru(folio)) { in folio_isolate_lru()
2305 folio_get(folio); in folio_isolate_lru()
2306 lruvec = folio_lruvec_lock_irq(folio); in folio_isolate_lru()
2307 lruvec_del_folio(lruvec, folio); in folio_isolate_lru()
2372 struct folio *folio = lru_to_folio(list); in move_folios_to_lru() local
2374 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in move_folios_to_lru()
2375 list_del(&folio->lru); in move_folios_to_lru()
2376 if (unlikely(!folio_evictable(folio))) { in move_folios_to_lru()
2378 folio_putback_lru(folio); in move_folios_to_lru()
2394 folio_set_lru(folio); in move_folios_to_lru()
2396 if (unlikely(folio_put_testzero(folio))) { in move_folios_to_lru()
2397 __folio_clear_lru_flags(folio); in move_folios_to_lru()
2399 if (unlikely(folio_test_large(folio))) { in move_folios_to_lru()
2401 destroy_large_folio(folio); in move_folios_to_lru()
2404 list_add(&folio->lru, &folios_to_free); in move_folios_to_lru()
2413 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); in move_folios_to_lru()
2414 lruvec_add_folio(lruvec, folio); in move_folios_to_lru()
2415 nr_pages = folio_nr_pages(folio); in move_folios_to_lru()
2417 if (folio_test_active(folio)) in move_folios_to_lru()
2595 struct folio *folio; in shrink_active_list() local
2598 folio = lru_to_folio(&l_hold); in shrink_active_list()
2599 list_del(&folio->lru); in shrink_active_list()
2601 if (unlikely(!folio_evictable(folio))) { in shrink_active_list()
2602 folio_putback_lru(folio); in shrink_active_list()
2607 if (folio_test_private(folio) && folio_trylock(folio)) { in shrink_active_list()
2608 if (folio_test_private(folio)) in shrink_active_list()
2609 filemap_release_folio(folio, 0); in shrink_active_list()
2610 folio_unlock(folio); in shrink_active_list()
2615 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2626 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { in shrink_active_list()
2627 nr_rotated += folio_nr_pages(folio); in shrink_active_list()
2628 list_add(&folio->lru, &l_active); in shrink_active_list()
2633 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2634 folio_set_workingset(folio); in shrink_active_list()
2635 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2665 struct folio *folio; in reclaim_folio_list() local
2676 folio = lru_to_folio(folio_list); in reclaim_folio_list()
2677 list_del(&folio->lru); in reclaim_folio_list()
2678 folio_putback_lru(folio); in reclaim_folio_list()
2698 struct folio *folio = lru_to_folio(folio_list); in reclaim_pages() local
2700 if (nid == folio_nid(folio)) { in reclaim_pages()
2701 folio_clear_active(folio); in reclaim_pages()
2702 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
3648 static int folio_update_gen(struct folio *folio, int gen) in folio_update_gen() argument
3650 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_update_gen()
3665 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_update_gen()
3671 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in folio_inc_gen() argument
3673 int type = folio_is_file_lru(folio); in folio_inc_gen()
3676 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_gen()
3678 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); in folio_inc_gen()
3693 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_gen()
3695 lru_gen_update_size(lruvec, folio, old_gen, new_gen); in folio_inc_gen()
3700 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, in update_batch_size() argument
3703 int type = folio_is_file_lru(folio); in update_batch_size()
3704 int zone = folio_zonenum(folio); in update_batch_size()
3705 int delta = folio_nr_pages(folio); in update_batch_size()
3844 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, in get_pfn_folio()
3847 struct folio *folio; in get_pfn_folio() local
3853 folio = pfn_folio(pfn); in get_pfn_folio()
3854 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3857 if (folio_memcg_rcu(folio) != memcg) in get_pfn_folio()
3861 if (!folio_is_file_lru(folio) && !can_swap) in get_pfn_folio()
3864 return folio; in get_pfn_folio()
3901 struct folio *folio; in walk_pte_range() local
3915 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pte_range()
3916 if (!folio) in walk_pte_range()
3925 if (pte_dirty(pte[i]) && !folio_test_dirty(folio) && in walk_pte_range()
3926 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && in walk_pte_range()
3927 !folio_test_swapcache(folio))) in walk_pte_range()
3928 folio_mark_dirty(folio); in walk_pte_range()
3930 old_gen = folio_update_gen(folio, new_gen); in walk_pte_range()
3932 update_batch_size(walk, folio, old_gen, new_gen); in walk_pte_range()
3982 struct folio *folio; in walk_pmd_range_locked() local
3996 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pmd_range_locked()
3997 if (!folio) in walk_pmd_range_locked()
4005 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) && in walk_pmd_range_locked()
4006 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && in walk_pmd_range_locked()
4007 !folio_test_swapcache(folio))) in walk_pmd_range_locked()
4008 folio_mark_dirty(folio); in walk_pmd_range_locked()
4010 old_gen = folio_update_gen(folio, new_gen); in walk_pmd_range_locked()
4012 update_batch_size(walk, folio, old_gen, new_gen); in walk_pmd_range_locked()
4247 struct folio *folio = lru_to_folio(head); in inc_min_seq() local
4249 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in inc_min_seq()
4250 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in inc_min_seq()
4251 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in inc_min_seq()
4252 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); in inc_min_seq()
4254 new_gen = folio_inc_gen(lruvec, folio, false); in inc_min_seq()
4255 list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]); in inc_min_seq()
4599 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() local
4600 struct mem_cgroup *memcg = folio_memcg(folio); in lru_gen_look_around()
4601 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_look_around()
4607 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); in lru_gen_look_around()
4644 folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap); in lru_gen_look_around()
4645 if (!folio) in lru_gen_look_around()
4653 if (pte_dirty(pte[i]) && !folio_test_dirty(folio) && in lru_gen_look_around()
4654 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && in lru_gen_look_around()
4655 !folio_test_swapcache(folio))) in lru_gen_look_around()
4656 folio_mark_dirty(folio); in lru_gen_look_around()
4658 old_gen = folio_lru_gen(folio); in lru_gen_look_around()
4660 folio_set_referenced(folio); in lru_gen_look_around()
4674 folio = pfn_folio(pte_pfn(pte[i])); in lru_gen_look_around()
4675 folio_activate(folio); in lru_gen_look_around()
4690 folio = pfn_folio(pte_pfn(pte[i])); in lru_gen_look_around()
4691 if (folio_memcg_rcu(folio) != memcg) in lru_gen_look_around()
4694 old_gen = folio_update_gen(folio, new_gen); in lru_gen_look_around()
4699 update_batch_size(walk, folio, old_gen, new_gen); in lru_gen_look_around()
4701 lru_gen_update_size(lruvec, folio, old_gen, new_gen); in lru_gen_look_around()
4714 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx) in sort_folio() argument
4717 int gen = folio_lru_gen(folio); in sort_folio()
4718 int type = folio_is_file_lru(folio); in sort_folio()
4719 int zone = folio_zonenum(folio); in sort_folio()
4720 int delta = folio_nr_pages(folio); in sort_folio()
4721 int refs = folio_lru_refs(folio); in sort_folio()
4725 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); in sort_folio()
4728 if (!folio_evictable(folio)) { in sort_folio()
4729 success = lru_gen_del_folio(lruvec, folio, true); in sort_folio()
4730 VM_WARN_ON_ONCE_FOLIO(!success, folio); in sort_folio()
4731 folio_set_unevictable(folio); in sort_folio()
4732 lruvec_add_folio(lruvec, folio); in sort_folio()
4738 if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) { in sort_folio()
4739 success = lru_gen_del_folio(lruvec, folio, true); in sort_folio()
4740 VM_WARN_ON_ONCE_FOLIO(!success, folio); in sort_folio()
4741 folio_set_swapbacked(folio); in sort_folio()
4742 lruvec_add_folio_tail(lruvec, folio); in sort_folio()
4748 list_move(&folio->lru, &lrugen->lists[gen][type][zone]); in sort_folio()
4756 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4757 list_move_tail(&folio->lru, &lrugen->lists[gen][type][zone]); in sort_folio()
4766 if (folio_test_locked(folio) || folio_test_writeback(folio) || in sort_folio()
4767 (type == LRU_GEN_FILE && folio_test_dirty(folio))) { in sort_folio()
4768 gen = folio_inc_gen(lruvec, folio, true); in sort_folio()
4769 list_move(&folio->lru, &lrugen->lists[gen][type][zone]); in sort_folio()
4776 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) in isolate_folio() argument
4781 if (!sc->may_unmap && folio_mapped(folio)) in isolate_folio()
4786 (folio_test_dirty(folio) || in isolate_folio()
4787 (folio_test_anon(folio) && !folio_test_swapcache(folio)))) in isolate_folio()
4791 if (!folio_try_get(folio)) in isolate_folio()
4795 if (!folio_test_clear_lru(folio)) { in isolate_folio()
4796 folio_put(folio); in isolate_folio()
4801 if (!folio_test_referenced(folio)) in isolate_folio()
4802 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); in isolate_folio()
4805 folio_clear_reclaim(folio); in isolate_folio()
4806 folio_clear_referenced(folio); in isolate_folio()
4808 success = lru_gen_del_folio(lruvec, folio, true); in isolate_folio()
4809 VM_WARN_ON_ONCE_FOLIO(!success, folio); in isolate_folio()
4839 struct folio *folio = lru_to_folio(head); in scan_folios() local
4840 int delta = folio_nr_pages(folio); in scan_folios()
4842 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in scan_folios()
4843 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in scan_folios()
4844 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in scan_folios()
4845 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); in scan_folios()
4849 if (sort_folio(lruvec, folio, tier)) in scan_folios()
4851 else if (isolate_folio(lruvec, folio, sc)) { in scan_folios()
4852 list_add(&folio->lru, list); in scan_folios()
4855 list_move(&folio->lru, &moved); in scan_folios()
4987 struct folio *folio; in evict_folios() local
4988 struct folio *next; in evict_folios()
5013 list_for_each_entry_safe_reverse(folio, next, &list, lru) { in evict_folios()
5014 if (!folio_evictable(folio)) { in evict_folios()
5015 list_del(&folio->lru); in evict_folios()
5016 folio_putback_lru(folio); in evict_folios()
5020 if (folio_test_reclaim(folio) && in evict_folios()
5021 (folio_test_dirty(folio) || folio_test_writeback(folio))) { in evict_folios()
5023 if (folio_test_workingset(folio)) in evict_folios()
5024 folio_set_referenced(folio); in evict_folios()
5028 if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || in evict_folios()
5029 folio_mapped(folio) || folio_test_locked(folio) || in evict_folios()
5030 folio_test_dirty(folio) || folio_test_writeback(folio)) { in evict_folios()
5032 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, in evict_folios()
5038 list_move(&folio->lru, &clean); in evict_folios()
5039 sc->nr_scanned -= folio_nr_pages(folio); in evict_folios()
5256 struct folio *folio = lru_to_folio(head); in fill_evictable() local
5258 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in fill_evictable()
5259 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); in fill_evictable()
5260 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in fill_evictable()
5261 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
5263 lruvec_del_folio(lruvec, folio); in fill_evictable()
5264 success = lru_gen_add_folio(lruvec, folio, false); in fill_evictable()
5285 struct folio *folio = lru_to_folio(head); in drain_evictable() local
5287 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); in drain_evictable()
5288 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); in drain_evictable()
5289 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); in drain_evictable()
5290 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); in drain_evictable()
5292 success = lru_gen_del_folio(lruvec, folio, false); in drain_evictable()
5294 lruvec_add_folio(lruvec, folio); in drain_evictable()
7737 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios() local
7738 int nr_pages = folio_nr_pages(folio); in check_move_unevictable_folios()
7743 if (!folio_test_clear_lru(folio)) in check_move_unevictable_folios()
7746 lruvec = folio_lruvec_relock_irq(folio, lruvec); in check_move_unevictable_folios()
7747 if (folio_evictable(folio) && folio_test_unevictable(folio)) { in check_move_unevictable_folios()
7748 lruvec_del_folio(lruvec, folio); in check_move_unevictable_folios()
7749 folio_clear_unevictable(folio); in check_move_unevictable_folios()
7750 lruvec_add_folio(lruvec, folio); in check_move_unevictable_folios()
7753 folio_set_lru(folio); in check_move_unevictable_folios()