Lines Matching +full:oc +full:- +full:delay +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0
31 #include <linux/backing-dev.h>
39 #include <linux/delay.h>
46 #include <linux/memory-tiers.h>
175 if ((_folio)->lru.prev != _base) { \
178 prev = lru_to_folio(&(_folio->lru)); \
179 prefetchw(&prev->_field); \
211 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, in shrinker_info_protected()
226 pn = memcg->nodeinfo[nid]; in expand_one_shrinker_info()
233 if (new_nr_max <= old->map_nr_max) in expand_one_shrinker_info()
238 return -ENOMEM; in expand_one_shrinker_info()
240 new->nr_deferred = (atomic_long_t *)(new + 1); in expand_one_shrinker_info()
241 new->map = (void *)new->nr_deferred + defer_size; in expand_one_shrinker_info()
242 new->map_nr_max = new_nr_max; in expand_one_shrinker_info()
245 memset(new->map, (int)0xff, old_map_size); in expand_one_shrinker_info()
246 memset((void *)new->map + old_map_size, 0, map_size - old_map_size); in expand_one_shrinker_info()
248 memcpy(new->nr_deferred, old->nr_deferred, old_defer_size); in expand_one_shrinker_info()
249 memset((void *)new->nr_deferred + old_defer_size, 0, in expand_one_shrinker_info()
250 defer_size - old_defer_size); in expand_one_shrinker_info()
252 rcu_assign_pointer(pn->shrinker_info, new); in expand_one_shrinker_info()
266 pn = memcg->nodeinfo[nid]; in free_shrinker_info()
267 info = rcu_dereference_protected(pn->shrinker_info, true); in free_shrinker_info()
269 rcu_assign_pointer(pn->shrinker_info, NULL); in free_shrinker_info()
287 ret = -ENOMEM; in alloc_shrinker_info()
290 info->nr_deferred = (atomic_long_t *)(info + 1); in alloc_shrinker_info()
291 info->map = (void *)info->nr_deferred + defer_size; in alloc_shrinker_info()
292 info->map_nr_max = shrinker_nr_max; in alloc_shrinker_info()
293 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info()
341 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); in set_shrinker_bit()
342 if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) { in set_shrinker_bit()
345 set_bit(shrinker_id, info->map); in set_shrinker_bit()
355 int id, ret = -ENOMEM; in prealloc_memcg_shrinker()
358 return -ENOSYS; in prealloc_memcg_shrinker()
372 shrinker->id = id; in prealloc_memcg_shrinker()
381 int id = shrinker->id; in unregister_memcg_shrinker()
396 return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0); in xchg_nr_deferred_memcg()
405 return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); in add_nr_deferred_memcg()
424 for (i = 0; i < child_info->map_nr_max; i++) { in reparent_shrinker_deferred()
425 nr = atomic_long_read(&child_info->nr_deferred[i]); in reparent_shrinker_deferred()
426 atomic_long_add(nr, &parent_info->nr_deferred[i]); in reparent_shrinker_deferred()
435 return sc->target_mem_cgroup; in cgroup_reclaim()
444 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); in root_reclaim()
448 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
473 return -ENOSYS; in prealloc_memcg_shrinker()
512 WARN_ON_ONCE(rs && task->reclaim_state); in set_task_reclaim_state()
514 /* Check for the nulling of an already-nulled member */ in set_task_reclaim_state()
515 WARN_ON_ONCE(!rs && !task->reclaim_state); in set_task_reclaim_state()
517 task->reclaim_state = rs; in set_task_reclaim_state()
521 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
522 * scan_control->nr_reclaimed.
527 * Currently, reclaim_state->reclaimed includes three types of pages in flush_reclaim_state()
534 * single memcg. For example, a memcg-aware shrinker can free one object in flush_reclaim_state()
537 * overestimating the reclaimed amount (potentially under-reclaiming). in flush_reclaim_state()
539 * Only count such pages for global reclaim to prevent under-reclaiming in flush_reclaim_state()
554 if (current->reclaim_state && root_reclaim(sc)) { in flush_reclaim_state()
555 sc->nr_reclaimed += current->reclaim_state->reclaimed; in flush_reclaim_state()
556 current->reclaim_state->reclaimed = 0; in flush_reclaim_state()
563 int nid = sc->nid; in xchg_nr_deferred()
565 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) in xchg_nr_deferred()
568 if (sc->memcg && in xchg_nr_deferred()
569 (shrinker->flags & SHRINKER_MEMCG_AWARE)) in xchg_nr_deferred()
571 sc->memcg); in xchg_nr_deferred()
573 return atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in xchg_nr_deferred()
580 int nid = sc->nid; in add_nr_deferred()
582 if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) in add_nr_deferred()
585 if (sc->memcg && in add_nr_deferred()
586 (shrinker->flags & SHRINKER_MEMCG_AWARE)) in add_nr_deferred()
588 sc->memcg); in add_nr_deferred()
590 return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]); in add_nr_deferred()
597 if (sc && sc->no_demotion) in can_demote()
611 * For non-memcg reclaim, is there in can_reclaim_anon_pages()
649 * lruvec_lru_size - Returns the number of pages on the given LRU list.
652 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
661 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
682 if (shrinker->flags & SHRINKER_MEMCG_AWARE) { in __prealloc_shrinker()
684 if (err != -ENOSYS) in __prealloc_shrinker()
687 shrinker->flags &= ~SHRINKER_MEMCG_AWARE; in __prealloc_shrinker()
690 size = sizeof(*shrinker->nr_deferred); in __prealloc_shrinker()
691 if (shrinker->flags & SHRINKER_NUMA_AWARE) in __prealloc_shrinker()
694 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); in __prealloc_shrinker()
695 if (!shrinker->nr_deferred) in __prealloc_shrinker()
696 return -ENOMEM; in __prealloc_shrinker()
708 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); in prealloc_shrinker()
710 if (!shrinker->name) in prealloc_shrinker()
711 return -ENOMEM; in prealloc_shrinker()
715 kfree_const(shrinker->name); in prealloc_shrinker()
716 shrinker->name = NULL; in prealloc_shrinker()
731 kfree_const(shrinker->name); in free_prealloced_shrinker()
732 shrinker->name = NULL; in free_prealloced_shrinker()
734 if (shrinker->flags & SHRINKER_MEMCG_AWARE) { in free_prealloced_shrinker()
741 kfree(shrinker->nr_deferred); in free_prealloced_shrinker()
742 shrinker->nr_deferred = NULL; in free_prealloced_shrinker()
748 list_add_tail(&shrinker->list, &shrinker_list); in register_shrinker_prepared()
749 shrinker->flags |= SHRINKER_REGISTERED; in register_shrinker_prepared()
771 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); in register_shrinker()
773 if (!shrinker->name) in register_shrinker()
774 return -ENOMEM; in register_shrinker()
778 kfree_const(shrinker->name); in register_shrinker()
779 shrinker->name = NULL; in register_shrinker()
799 if (!(shrinker->flags & SHRINKER_REGISTERED)) in unregister_shrinker()
803 list_del(&shrinker->list); in unregister_shrinker()
804 shrinker->flags &= ~SHRINKER_REGISTERED; in unregister_shrinker()
805 if (shrinker->flags & SHRINKER_MEMCG_AWARE) in unregister_shrinker()
812 kfree(shrinker->nr_deferred); in unregister_shrinker()
813 shrinker->nr_deferred = NULL; in unregister_shrinker()
818 * synchronize_shrinkers - Wait for all running shrinkers to complete.
843 long batch_size = shrinker->batch ? shrinker->batch in do_shrink_slab()
847 freeable = shrinker->count_objects(shrinker, shrinkctl); in do_shrink_slab()
858 if (shrinker->seeks) { in do_shrink_slab()
861 do_div(delta, shrinker->seeks); in do_shrink_slab()
898 shrinkctl->nr_to_scan = nr_to_scan; in do_shrink_slab()
899 shrinkctl->nr_scanned = nr_to_scan; in do_shrink_slab()
900 ret = shrinker->scan_objects(shrinker, shrinkctl); in do_shrink_slab()
905 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); in do_shrink_slab()
906 total_scan -= shrinkctl->nr_scanned; in do_shrink_slab()
907 scanned += shrinkctl->nr_scanned; in do_shrink_slab()
918 next_deferred = max_t(long, (nr + delta - scanned), 0); in do_shrink_slab()
927 trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); in do_shrink_slab()
949 for_each_set_bit(i, info->map, info->map_nr_max) { in shrink_slab_memcg()
958 if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) { in shrink_slab_memcg()
960 clear_bit(i, info->map); in shrink_slab_memcg()
964 /* Call non-slab shrinkers even though kmem is disabled */ in shrink_slab_memcg()
966 !(shrinker->flags & SHRINKER_NONSLAB)) in shrink_slab_memcg()
971 clear_bit(i, info->map); in shrink_slab_memcg()
1014 * shrink_slab - shrink slab caches
1028 * @priority is sc->priority, we take the number of objects and >> by priority
1113 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
1114 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); in reclaimer_offset()
1115 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
1116 PGSCAN_DIRECT - PGSCAN_KSWAPD); in reclaimer_offset()
1117 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
1118 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); in reclaimer_offset()
1119 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
1120 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); in reclaimer_offset()
1125 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; in reclaimer_offset()
1126 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; in reclaimer_offset()
1134 * private data at folio->private. in is_page_cache_freeable()
1136 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
1142 * -ENOSPC. We need to propagate that into the address_space for a subsequent
1170 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
1179 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
1196 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
1206 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { in reclaim_throttle()
1214 * parallel reclaimers which is a short-lived event so the timeout is in reclaim_throttle()
1216 * potentially long-lived events so use a longer timeout. This is shaky in reclaim_throttle()
1225 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
1226 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
1256 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
1258 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
1259 jiffies_to_usecs(timeout - ret), in reclaim_throttle()
1276 * This is an inaccurate read as the per-cpu deltas may not in __acct_reclaim_writeback()
1282 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
1283 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
1286 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
1303 * Calls ->writepage().
1310 * will be non-blocking. To prevent this allocation from being in pageout()
1329 * folio->mapping == NULL while being dirty with clean buffers. in pageout()
1340 if (mapping->a_ops->writepage == NULL) in pageout()
1355 res = mapping->a_ops->writepage(&folio->page, &wbc); in pageout()
1389 spin_lock(&mapping->host->i_lock); in __remove_mapping()
1390 xa_lock_irq(&mapping->i_pages); in __remove_mapping()
1410 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags in __remove_mapping()
1411 * load is not satisfied before that of folio->_refcount. in __remove_mapping()
1426 swp_entry_t swap = folio->swap; in __remove_mapping()
1432 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
1437 free_folio = mapping->a_ops->free_folio; in __remove_mapping()
1458 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
1460 inode_add_lru(mapping->host); in __remove_mapping()
1461 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
1470 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
1472 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
1477 * remove_mapping() - Attempt to remove a folio from its mapping.
1493 * drops the pagecache ref for us without requiring another in remove_mapping()
1503 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
1530 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
1542 if (referenced_ptes == -1) in folio_check_references()
1566 * Activate file-backed executable folios after first usage. in folio_check_references()
1610 if (mapping && mapping->a_ops->is_dirty_writeback) in folio_check_dirty_writeback()
1611 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
1623 allowed_mask = mtc->nmask; in alloc_demote_folio()
1633 mtc->nmask = NULL; in alloc_demote_folio()
1634 mtc->gfp_mask |= __GFP_THISNODE; in alloc_demote_folio()
1639 mtc->gfp_mask &= ~__GFP_THISNODE; in alloc_demote_folio()
1640 mtc->nmask = allowed_mask; in alloc_demote_folio()
1652 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
1693 * We can "enter_fs" for swap-cache with only __GFP_IO in may_enter_fs()
1695 * ->flags can be updated non-atomicially (scan_swap_map_slots), in may_enter_fs()
1719 do_demote_pass = can_demote(pgdat->node_id, sc); in shrink_folio_list()
1732 list_del(&folio->lru); in shrink_folio_list()
1742 sc->nr_scanned += nr_pages; in shrink_folio_list()
1747 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1762 stat->nr_dirty += nr_pages; in shrink_folio_list()
1765 stat->nr_unqueued_dirty += nr_pages; in shrink_folio_list()
1774 stat->nr_congested += nr_pages; in shrink_folio_list()
1824 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1825 stat->nr_immediate += nr_pages; in shrink_folio_list()
1831 !may_enter_fs(folio, sc->gfp_mask)) { in shrink_folio_list()
1833 * This is slightly racy - in shrink_folio_list()
1837 * interpreted as the readahead flag - but in shrink_folio_list()
1847 stat->nr_writeback += nr_pages; in shrink_folio_list()
1855 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1867 stat->nr_ref_keep += nr_pages; in shrink_folio_list()
1880 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1892 if (!(sc->gfp_mask & __GFP_IO)) in shrink_folio_list()
1937 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1954 stat->nr_unmap_fail += nr_pages; in shrink_folio_list()
1957 stat->nr_lazyfree_fail += nr_pages; in shrink_folio_list()
1977 * injecting inefficient single-folio I/O into in shrink_folio_list()
1988 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
2004 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
2006 if (!sc->may_writepage) in shrink_folio_list()
2021 stat->nr_pageout += nr_pages; in shrink_folio_list()
2029 * A synchronous write - probably a ramdisk. Go in shrink_folio_list()
2057 * and mark the folio clean - it can be freed. in shrink_folio_list()
2059 * Rarely, folios can have buffers and no ->mapping. in shrink_folio_list()
2068 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
2103 sc->target_mem_cgroup)) in shrink_folio_list()
2121 list_add(&folio->lru, &free_folios); in shrink_folio_list()
2130 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
2142 stat->nr_activate[type] += nr_pages; in shrink_folio_list()
2148 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
2177 if (!sc->proactive) { in shrink_folio_list()
2183 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; in shrink_folio_list()
2215 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
2226 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
2231 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
2232 -(long)nr_reclaimed); in reclaim_clean_pages_from_list()
2239 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, in reclaim_clean_pages_from_list()
2241 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
2242 -(long)stat.nr_lazyfree_fail); in reclaim_clean_pages_from_list()
2259 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
2268 * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
2273 gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && in skip_cma()
2274 get_pageblock_migratetype(&folio->page) == MIGRATE_CMA; in skip_cma()
2286 * lruvec->lru_lock is heavily contended. Some of the functions that
2309 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
2329 if (folio_zonenum(folio) > sc->reclaim_idx || in isolate_lru_folios()
2347 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
2352 * sure the folio is not being freed elsewhere -- the in isolate_lru_folios()
2368 list_move(&folio->lru, move_to); in isolate_lru_folios()
2391 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_folios()
2393 sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru); in isolate_lru_folios()
2399 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
2469 * won't get blocked by normal direct-reclaimers, forming a circular in too_many_isolated()
2472 if (gfp_has_io_fs(sc->gfp_mask)) in too_many_isolated()
2500 list_del(&folio->lru); in move_folios_to_lru()
2502 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2504 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2516 * list_add(&folio->lru,) in move_folios_to_lru()
2517 * list_add(&folio->lru,) in move_folios_to_lru()
2525 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2527 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2529 list_add(&folio->lru, &folios_to_free); in move_folios_to_lru()
2555 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
2561 return !(current->flags & PF_LOCAL_THROTTLE); in current_may_throttle()
2597 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2609 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2616 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2619 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2625 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2627 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); in shrink_inactive_list()
2657 sc->nr.dirty += stat.nr_dirty; in shrink_inactive_list()
2658 sc->nr.congested += stat.nr_congested; in shrink_inactive_list()
2659 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in shrink_inactive_list()
2660 sc->nr.writeback += stat.nr_writeback; in shrink_inactive_list()
2661 sc->nr.immediate += stat.nr_immediate; in shrink_inactive_list()
2662 sc->nr.taken += nr_taken; in shrink_inactive_list()
2664 sc->nr.file_taken += nr_taken; in shrink_inactive_list()
2666 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2667 nr_scanned, nr_reclaimed, &stat, sc->priority, file); in shrink_inactive_list()
2682 * It is safe to rely on the active flag against the non-LRU folios in here
2683 * because nobody will play with that bit on a non-LRU folio.
2685 * The downside is that we have to touch folio->_refcount against each folio.
2686 * But we had to alter folio->flags anyway.
2706 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2717 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2724 list_del(&folio->lru); in shrink_active_list()
2740 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2743 * Identify referenced, file-backed active folios and in shrink_active_list()
2747 * are not likely to be evicted by use-once streaming in shrink_active_list()
2753 list_add(&folio->lru, &l_active); in shrink_active_list()
2758 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2760 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2766 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2776 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2777 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2783 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2784 nr_deactivate, nr_rotated, sc->priority, file); in shrink_active_list()
2804 list_del(&folio->lru); in reclaim_folio_list()
2829 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
2848 if (sc->may_deactivate & (1 << is_file_lru(lru))) in shrink_list()
2851 sc->skipped_deactivate = 1; in shrink_list()
2863 * to the established workingset on the scan-resistant active list,
2877 * -------------------------------------
2896 gb = (inactive + active) >> (30 - PAGE_SHIFT); in inactive_is_low()
2920 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_count()
2923 * Flush the memory cgroup stats, so that we read accurate per-memcg in prepare_scan_count()
2931 spin_lock_irq(&target_lruvec->lru_lock); in prepare_scan_count()
2932 sc->anon_cost = target_lruvec->anon_cost; in prepare_scan_count()
2933 sc->file_cost = target_lruvec->file_cost; in prepare_scan_count()
2934 spin_unlock_irq(&target_lruvec->lru_lock); in prepare_scan_count()
2940 if (!sc->force_deactivate) { in prepare_scan_count()
2950 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || in prepare_scan_count()
2952 sc->may_deactivate |= DEACTIVATE_ANON; in prepare_scan_count()
2954 sc->may_deactivate &= ~DEACTIVATE_ANON; in prepare_scan_count()
2958 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || in prepare_scan_count()
2960 sc->may_deactivate |= DEACTIVATE_FILE; in prepare_scan_count()
2962 sc->may_deactivate &= ~DEACTIVATE_FILE; in prepare_scan_count()
2964 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; in prepare_scan_count()
2972 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) in prepare_scan_count()
2973 sc->cache_trim_mode = 1; in prepare_scan_count()
2975 sc->cache_trim_mode = 0; in prepare_scan_count()
2991 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_count()
2996 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_count()
3011 sc->file_is_tiny = in prepare_scan_count()
3013 !(sc->may_deactivate & DEACTIVATE_ANON) && in prepare_scan_count()
3014 anon >> sc->priority; in prepare_scan_count()
3039 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
3061 if (!sc->priority && swappiness) { in get_scan_count()
3067 * If the system is almost out of file pages, force-scan anon. in get_scan_count()
3069 if (sc->file_is_tiny) { in get_scan_count()
3078 if (sc->cache_trim_mode) { in get_scan_count()
3099 total_cost = sc->anon_cost + sc->file_cost; in get_scan_count()
3100 anon_cost = total_cost + sc->anon_cost; in get_scan_count()
3101 file_cost = total_cost + sc->file_cost; in get_scan_count()
3107 fp = (200 - swappiness) * (total_cost + 1); in get_scan_count()
3120 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
3121 mem_cgroup_protection(sc->target_mem_cgroup, memcg, in get_scan_count()
3131 * becomes extremely binary -- from nothing as we in get_scan_count()
3146 * the best-effort low protection. However, we still in get_scan_count()
3147 * ideally want to honor how well-behaved groups are in in get_scan_count()
3158 if (!sc->memcg_low_reclaim && low > min) { in get_scan_count()
3160 sc->memcg_low_skipped = 1; in get_scan_count()
3168 scan = lruvec_size - lruvec_size * protection / in get_scan_count()
3174 * sc->priority further than desirable. in get_scan_count()
3181 scan >>= sc->priority; in get_scan_count()
3200 * round-off error. in get_scan_count()
3234 return can_demote(pgdat->node_id, sc); in can_age_anon_pages()
3264 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
3268 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
3269 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
3286 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec()
3289 if (!lruvec->pgdat) in get_lruvec()
3290 lruvec->pgdat = pgdat; in get_lruvec()
3297 return &pgdat->__lruvec; in get_lruvec()
3305 if (!sc->may_swap) in get_swappiness()
3308 if (!can_demote(pgdat->node_id, sc) && in get_swappiness()
3317 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
3339 * To get rid of non-leaf entries that no longer have enough leaf entries, the
3340 * aging uses the double-buffering technique to flip to the other filter each
3341 * time it produces a new generation. For non-leaf entries that have enough
3367 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); in get_item_key()
3377 filter = READ_ONCE(lruvec->mm_state.filters[gen]); in test_bloom_filter()
3392 filter = READ_ONCE(lruvec->mm_state.filters[gen]); in update_bloom_filter()
3409 filter = lruvec->mm_state.filters[gen]; in reset_bloom_filter()
3417 WRITE_ONCE(lruvec->mm_state.filters[gen], filter); in reset_bloom_filter()
3433 return &memcg->mm_list; in get_mm_list()
3446 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); in lru_gen_add_mm()
3448 VM_WARN_ON_ONCE(mm->lru_gen.memcg); in lru_gen_add_mm()
3449 mm->lru_gen.memcg = memcg; in lru_gen_add_mm()
3451 spin_lock(&mm_list->lock); in lru_gen_add_mm()
3457 if (lruvec->mm_state.tail == &mm_list->fifo) in lru_gen_add_mm()
3458 lruvec->mm_state.tail = &mm->lru_gen.list; in lru_gen_add_mm()
3461 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); in lru_gen_add_mm()
3463 spin_unlock(&mm_list->lock); in lru_gen_add_mm()
3472 if (list_empty(&mm->lru_gen.list)) in lru_gen_del_mm()
3476 memcg = mm->lru_gen.memcg; in lru_gen_del_mm()
3480 spin_lock(&mm_list->lock); in lru_gen_del_mm()
3486 if (lruvec->mm_state.head == &mm->lru_gen.list) in lru_gen_del_mm()
3487 lruvec->mm_state.head = lruvec->mm_state.head->prev; in lru_gen_del_mm()
3490 if (lruvec->mm_state.tail == &mm->lru_gen.list) in lru_gen_del_mm()
3491 lruvec->mm_state.tail = lruvec->mm_state.tail->next; in lru_gen_del_mm()
3494 list_del_init(&mm->lru_gen.list); in lru_gen_del_mm()
3496 spin_unlock(&mm_list->lock); in lru_gen_del_mm()
3499 mem_cgroup_put(mm->lru_gen.memcg); in lru_gen_del_mm()
3500 mm->lru_gen.memcg = NULL; in lru_gen_del_mm()
3508 struct task_struct *task = rcu_dereference_protected(mm->owner, true); in lru_gen_migrate_mm()
3510 VM_WARN_ON_ONCE(task->mm != mm); in lru_gen_migrate_mm()
3511 lockdep_assert_held(&task->alloc_lock); in lru_gen_migrate_mm()
3518 if (!mm->lru_gen.memcg) in lru_gen_migrate_mm()
3524 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm()
3527 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); in lru_gen_migrate_mm()
3539 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
3542 hist = lru_hist_from_seq(walk->max_seq); in reset_mm_stats()
3545 WRITE_ONCE(lruvec->mm_state.stats[hist][i], in reset_mm_stats()
3546 lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]); in reset_mm_stats()
3547 walk->mm_stats[i] = 0; in reset_mm_stats()
3552 hist = lru_hist_from_seq(lruvec->mm_state.seq + 1); in reset_mm_stats()
3555 WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0); in reset_mm_stats()
3563 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in should_skip_mm()
3564 int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in should_skip_mm()
3566 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) in should_skip_mm()
3569 clear_bit(key, &mm->lru_gen.bitmap); in should_skip_mm()
3571 for (type = !walk->can_swap; type < ANON_AND_FILE; type++) { in should_skip_mm()
3591 struct lru_gen_mm_state *mm_state = &lruvec->mm_state; in iterate_mm_list()
3594 * mm_state->seq is incremented after each iteration of mm_list. There in iterate_mm_list()
3603 spin_lock(&mm_list->lock); in iterate_mm_list()
3605 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); in iterate_mm_list()
3607 if (walk->max_seq <= mm_state->seq) in iterate_mm_list()
3610 if (!mm_state->head) in iterate_mm_list()
3611 mm_state->head = &mm_list->fifo; in iterate_mm_list()
3613 if (mm_state->head == &mm_list->fifo) in iterate_mm_list()
3617 mm_state->head = mm_state->head->next; in iterate_mm_list()
3618 if (mm_state->head == &mm_list->fifo) { in iterate_mm_list()
3619 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list()
3625 if (!mm_state->tail || mm_state->tail == mm_state->head) { in iterate_mm_list()
3626 mm_state->tail = mm_state->head->next; in iterate_mm_list()
3627 walk->force_scan = true; in iterate_mm_list()
3630 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); in iterate_mm_list()
3638 spin_unlock(&mm_list->lock); in iterate_mm_list()
3641 reset_bloom_filter(lruvec, walk->max_seq + 1); in iterate_mm_list()
3656 struct lru_gen_mm_state *mm_state = &lruvec->mm_state; in iterate_mm_list_nowalk()
3658 spin_lock(&mm_list->lock); in iterate_mm_list_nowalk()
3660 VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); in iterate_mm_list_nowalk()
3662 if (max_seq > mm_state->seq) { in iterate_mm_list_nowalk()
3663 mm_state->head = NULL; in iterate_mm_list_nowalk()
3664 mm_state->tail = NULL; in iterate_mm_list_nowalk()
3665 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list_nowalk()
3670 spin_unlock(&mm_list->lock); in iterate_mm_list_nowalk()
3680 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3695 * 1. The D term may discount the other two terms over time so that long-lived
3707 struct lru_gen_folio *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3708 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in read_ctrl_pos()
3710 pos->refaulted = lrugen->avg_refaulted[type][tier] + in read_ctrl_pos()
3711 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in read_ctrl_pos()
3712 pos->total = lrugen->avg_total[type][tier] + in read_ctrl_pos()
3713 atomic_long_read(&lrugen->evicted[hist][type][tier]); in read_ctrl_pos()
3715 pos->total += lrugen->protected[hist][type][tier - 1]; in read_ctrl_pos()
3716 pos->gain = gain; in read_ctrl_pos()
3722 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3724 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; in reset_ctrl_pos()
3726 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3737 sum = lrugen->avg_refaulted[type][tier] + in reset_ctrl_pos()
3738 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in reset_ctrl_pos()
3739 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); in reset_ctrl_pos()
3741 sum = lrugen->avg_total[type][tier] + in reset_ctrl_pos()
3742 atomic_long_read(&lrugen->evicted[hist][type][tier]); in reset_ctrl_pos()
3744 sum += lrugen->protected[hist][type][tier - 1]; in reset_ctrl_pos()
3745 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); in reset_ctrl_pos()
3749 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); in reset_ctrl_pos()
3750 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); in reset_ctrl_pos()
3752 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); in reset_ctrl_pos()
3763 return pv->refaulted < MIN_LRU_BATCH || in positive_ctrl_err()
3764 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= in positive_ctrl_err()
3765 (sp->refaulted + 1) * pv->total * pv->gain; in positive_ctrl_err()
3775 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_update_gen()
3790 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_update_gen()
3792 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_update_gen()
3799 struct lru_gen_folio *lrugen = &lruvec->lrugen; in folio_inc_gen()
3800 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in folio_inc_gen()
3801 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_gen()
3806 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_inc_gen()
3818 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_gen()
3835 walk->batched++; in update_batch_size()
3837 walk->nr_pages[old_gen][type][zone] -= delta; in update_batch_size()
3838 walk->nr_pages[new_gen][type][zone] += delta; in update_batch_size()
3844 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_batch_size()
3846 walk->batched = 0; in reset_batch_size()
3850 int delta = walk->nr_pages[gen][type][zone]; in reset_batch_size()
3855 walk->nr_pages[gen][type][zone] = 0; in reset_batch_size()
3856 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], in reset_batch_size()
3857 lrugen->nr_pages[gen][type][zone] + delta); in reset_batch_size()
3868 struct vm_area_struct *vma = args->vma; in should_skip_vma()
3869 struct lru_gen_mm_walk *walk = args->private; in should_skip_vma()
3880 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) in should_skip_vma()
3883 if (vma == get_gate_vma(vma->vm_mm)) in should_skip_vma()
3887 return !walk->can_swap; in should_skip_vma()
3889 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) in should_skip_vma()
3892 mapping = vma->vm_file->f_mapping; in should_skip_vma()
3897 return !walk->can_swap; in should_skip_vma()
3900 return !mapping->a_ops->read_folio; in should_skip_vma()
3904 * Some userspace memory allocators map many single-page VMAs. Instead of
3913 VMA_ITERATOR(vmi, args->mm, start); in get_next_vma()
3918 for_each_vma(vmi, args->vma) { in get_next_vma()
3919 if (end && end <= args->vma->vm_start) in get_next_vma()
3922 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) in get_next_vma()
3925 *vm_start = max(start, args->vma->vm_start); in get_next_vma()
3926 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; in get_next_vma()
3938 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pte_pfn()
3941 return -1; in get_pte_pfn()
3944 return -1; in get_pte_pfn()
3947 return -1; in get_pte_pfn()
3957 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pmd_pfn()
3960 return -1; in get_pmd_pfn()
3963 return -1; in get_pmd_pfn()
3966 return -1; in get_pmd_pfn()
3978 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pfn_folio()
3982 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
4012 struct lru_gen_mm_walk *walk = args->private; in walk_pte_range()
4013 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
4014 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
4015 int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); in walk_pte_range()
4017 pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); in walk_pte_range()
4033 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pte_range()
4035 pfn = get_pte_pfn(ptent, args->vma, addr); in walk_pte_range()
4036 if (pfn == -1) in walk_pte_range()
4040 walk->mm_stats[MM_LEAF_OLD]++; in walk_pte_range()
4044 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pte_range()
4048 if (!ptep_test_and_clear_young(args->vma, addr, pte + i)) in walk_pte_range()
4052 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pte_range()
4080 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range_locked()
4081 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
4082 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
4083 int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); in walk_pmd_range_locked()
4088 if (*first == -1) { in walk_pmd_range_locked()
4094 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); in walk_pmd_range_locked()
4096 __set_bit(i - 1, bitmap); in walk_pmd_range_locked()
4102 ptl = pmd_lockptr(args->mm, pmd); in walk_pmd_range_locked()
4116 if (pfn == -1) in walk_pmd_range_locked()
4125 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pmd_range_locked()
4132 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pmd_range_locked()
4149 *first = -1; in walk_pmd_range_locked()
4167 unsigned long first = -1; in walk_pmd_range()
4168 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range()
4180 vma = args->vma; in walk_pmd_range()
4187 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
4194 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
4196 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
4199 walk->mm_stats[MM_LEAF_OLD]++; in walk_pmd_range()
4204 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in walk_pmd_range()
4211 walk->mm_stats[MM_NONLEAF_TOTAL]++; in walk_pmd_range()
4220 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) in walk_pmd_range()
4223 walk->mm_stats[MM_NONLEAF_FOUND]++; in walk_pmd_range()
4228 walk->mm_stats[MM_NONLEAF_ADDED]++; in walk_pmd_range()
4231 update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i); in walk_pmd_range()
4234 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); in walk_pmd_range()
4247 struct lru_gen_mm_walk *walk = args->private; in walk_pud_range()
4263 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { in walk_pud_range()
4274 if (!end || !args->vma) in walk_pud_range()
4277 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range()
4279 return -EAGAIN; in walk_pud_range()
4293 walk->next_addr = FIRST_USER_ADDRESS; in walk_mm()
4298 err = -EBUSY; in walk_mm()
4301 if (walk->max_seq != max_seq) in walk_mm()
4310 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); in walk_mm()
4317 if (walk->batched) { in walk_mm()
4318 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
4320 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
4324 } while (err == -EAGAIN); in walk_mm()
4329 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in set_mm_walk()
4334 walk = &pgdat->mm_walk; in set_mm_walk()
4341 current->reclaim_state->mm_walk = walk; in set_mm_walk()
4348 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in clear_mm_walk()
4350 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); in clear_mm_walk()
4351 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); in clear_mm_walk()
4353 current->reclaim_state->mm_walk = NULL; in clear_mm_walk()
4363 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_min_seq()
4364 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in inc_min_seq()
4371 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq()
4382 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
4384 if (!--remaining) in inc_min_seq()
4390 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); in inc_min_seq()
4399 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
4406 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { in try_to_inc_min_seq()
4410 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq()
4423 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); in try_to_inc_min_seq()
4427 if (min_seq[type] == lrugen->min_seq[type]) in try_to_inc_min_seq()
4431 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); in try_to_inc_min_seq()
4442 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_max_seq()
4444 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
4448 for (type = ANON_AND_FILE - 1; type >= 0; type--) { in inc_max_seq()
4457 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4468 prev = lru_gen_from_seq(lrugen->max_seq - 1); in inc_max_seq()
4469 next = lru_gen_from_seq(lrugen->max_seq + 1); in inc_max_seq()
4474 long delta = lrugen->nr_pages[prev][type][zone] - in inc_max_seq()
4475 lrugen->nr_pages[next][type][zone]; in inc_max_seq()
4481 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
4488 WRITE_ONCE(lrugen->timestamps[next], jiffies); in inc_max_seq()
4490 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); in inc_max_seq()
4492 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4501 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
4503 VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); in try_to_inc_max_seq()
4506 if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) { in try_to_inc_max_seq()
4528 walk->lruvec = lruvec; in try_to_inc_max_seq()
4529 walk->max_seq = max_seq; in try_to_inc_max_seq()
4530 walk->can_swap = can_swap; in try_to_inc_max_seq()
4531 walk->force_scan = force_scan; in try_to_inc_max_seq()
4554 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lruvec_is_sizable()
4566 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable()
4571 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; in lruvec_is_sizable()
4584 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lruvec_is_reclaimable()
4607 /* check the order to exclude compaction-induced reclaim */ in lru_gen_age_node()
4608 if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) in lru_gen_age_node()
4629 struct oom_control oc = { in lru_gen_age_node() local
4630 .gfp_mask = sc->gfp_mask, in lru_gen_age_node()
4633 out_of_memory(&oc); in lru_gen_age_node()
4657 pte_t *pte = pvmw->pte; in lru_gen_look_around()
4658 unsigned long addr = pvmw->address; in lru_gen_look_around()
4659 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around()
4667 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around()
4670 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around()
4674 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; in lru_gen_look_around()
4676 start = max(addr & PMD_MASK, pvmw->vma->vm_start); in lru_gen_look_around()
4677 end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; in lru_gen_look_around()
4679 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { in lru_gen_look_around()
4680 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4682 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4683 start = end - MIN_LRU_BATCH * PAGE_SIZE; in lru_gen_look_around()
4685 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around()
4696 pte -= (addr - start) / PAGE_SIZE; in lru_gen_look_around()
4702 pfn = get_pte_pfn(ptent, pvmw->vma, addr); in lru_gen_look_around()
4703 if (pfn == -1) in lru_gen_look_around()
4713 if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i)) in lru_gen_look_around()
4743 update_bloom_filter(lruvec, max_seq, pvmw->pmd); in lru_gen_look_around()
4763 return READ_ONCE(lruvec->lrugen.seg); in lru_gen_memcg_seg()
4774 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4776 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_rotate_memcg()
4779 new = old = lruvec->lrugen.gen; in lru_gen_rotate_memcg()
4787 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4789 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4793 hlist_nulls_del_rcu(&lruvec->lrugen.list); in lru_gen_rotate_memcg()
4796 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4798 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4800 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4801 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4803 lruvec->lrugen.gen = new; in lru_gen_rotate_memcg()
4804 WRITE_ONCE(lruvec->lrugen.seg, seg); in lru_gen_rotate_memcg()
4806 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4807 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4809 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4822 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4824 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_online_memcg()
4826 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4828 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4829 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4831 lruvec->lrugen.gen = gen; in lru_gen_online_memcg()
4833 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4857 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4859 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) in lru_gen_release_memcg()
4862 gen = lruvec->lrugen.gen; in lru_gen_release_memcg()
4864 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); in lru_gen_release_memcg()
4865 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4867 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4868 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4870 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4906 struct lru_gen_folio *lrugen = &lruvec->lrugen; in sort_folio()
4930 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { in sort_folio()
4931 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4937 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in sort_folio()
4940 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4942 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], in sort_folio()
4943 lrugen->protected[hist][type][tier - 1] + delta); in sort_folio()
4948 if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { in sort_folio()
4950 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4958 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4970 if (!(sc->gfp_mask & __GFP_IO) && in isolate_folio()
4987 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); in isolate_folio()
5009 struct lru_gen_folio *lrugen = &lruvec->lrugen; in scan_folios()
5017 gen = lru_gen_from_seq(lrugen->min_seq[type]); in scan_folios()
5019 for (i = MAX_NR_ZONES; i > 0; i--) { in scan_folios()
5022 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; in scan_folios()
5023 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios()
5039 list_add(&folio->lru, list); in scan_folios()
5042 list_move(&folio->lru, &moved); in scan_folios()
5046 if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH) in scan_folios()
5092 return tier - 1; in get_tier_idx()
5099 int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; in get_type_to_scan()
5118 *tier_idx = tier - 1; in get_type_to_scan()
5129 int tier = -1; in isolate_folios()
5157 tier = -1; in isolate_folios()
5181 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
5190 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
5196 sc->nr_reclaimed += reclaimed; in evict_folios()
5200 list_del(&folio->lru); in evict_folios()
5217 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, in evict_folios()
5223 list_move(&folio->lru, &clean); in evict_folios()
5224 sc->nr_scanned -= folio_nr_pages(folio); in evict_folios()
5227 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
5231 walk = current->reclaim_state->mm_walk; in evict_folios()
5232 if (walk && walk->batched) in evict_folios()
5241 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
5264 struct lru_gen_folio *lrugen = &lruvec->lrugen; in should_run_aging()
5283 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging()
5294 *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total; in should_run_aging()
5330 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) in get_nr_to_scan()
5337 if (sc->priority == DEF_PRIORITY) in get_nr_to_scan()
5341 return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0; in get_nr_to_scan()
5348 return -1; in get_nr_to_reclaim()
5350 return max(sc->nr_to_reclaim, compact_gap(sc->order)); in get_nr_to_reclaim()
5361 if (swappiness && !(sc->gfp_mask & __GFP_IO)) in try_to_shrink_lruvec()
5379 if (sc->nr_reclaimed >= nr_to_reclaim) in try_to_shrink_lruvec()
5392 unsigned long scanned = sc->nr_scanned; in shrink_one()
5393 unsigned long reclaimed = sc->nr_reclaimed; in shrink_one()
5417 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
5419 if (!sc->proactive) in shrink_one()
5420 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, in shrink_one()
5421 sc->nr_reclaimed - reclaimed); in shrink_one()
5446 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
5450 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
5473 if (sc->nr_reclaimed >= nr_to_reclaim) in shrink_many()
5484 if (sc->nr_reclaimed >= nr_to_reclaim) in shrink_many()
5502 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); in lru_gen_shrink_lruvec()
5508 set_mm_walk(NULL, sc->proactive); in lru_gen_shrink_lruvec()
5538 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) in set_initial_priority()
5551 /* round down reclaimable and round up sc->nr_to_reclaim */ in set_initial_priority()
5552 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); in set_initial_priority()
5554 sc->priority = clamp(priority, 0, DEF_PRIORITY); in set_initial_priority()
5560 unsigned long reclaimed = sc->nr_reclaimed; in lru_gen_shrink_node()
5569 if (!sc->may_writepage || !sc->may_unmap) in lru_gen_shrink_node()
5576 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
5581 sc->nr_reclaimed = 0; in lru_gen_shrink_node()
5584 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
5589 sc->nr_reclaimed += reclaimed; in lru_gen_shrink_node()
5596 pgdat->kswapd_failures = 0; in lru_gen_shrink_node()
5605 struct lru_gen_folio *lrugen = &lruvec->lrugen; in state_is_valid()
5607 if (lrugen->enabled) { in state_is_valid()
5611 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
5618 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid()
5634 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
5643 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
5649 if (!--remaining) in fill_evictable()
5663 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
5678 if (!--remaining) in drain_evictable()
5712 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5717 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5720 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5722 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5725 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5746 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5753 return -EINVAL; in min_ttl_ms_store()
5778 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5788 caps = -1; in enabled_store()
5790 return -EINVAL; in enabled_store()
5828 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); in lru_gen_seq_start()
5829 if (!m->private) in lru_gen_seq_start()
5830 return ERR_PTR(-ENOMEM); in lru_gen_seq_start()
5837 if (!nr_to_skip--) in lru_gen_seq_start()
5850 kvfree(m->private); in lru_gen_seq_stop()
5851 m->private = NULL; in lru_gen_seq_stop()
5856 int nid = lruvec_pgdat(v)->node_id; in lru_gen_seq_next()
5880 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5890 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); in lru_gen_seq_show_full()
5891 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); in lru_gen_seq_show_full()
5894 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); in lru_gen_seq_show_full()
5895 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); in lru_gen_seq_show_full()
5897 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); in lru_gen_seq_show_full()
5913 n = READ_ONCE(lruvec->mm_state.stats[hist][i]); in lru_gen_seq_show_full()
5916 n = READ_ONCE(lruvec->mm_state.stats[hist][i]); in lru_gen_seq_show_full()
5924 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5928 bool full = !debugfs_real_fops(m->file)->write; in lru_gen_seq_show()
5930 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5931 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5937 const char *path = memcg ? m->private : ""; in lru_gen_seq_show()
5941 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); in lru_gen_seq_show()
5951 seq = max_seq - MAX_NR_GENS + 1; in lru_gen_seq_show()
5958 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5960 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); in lru_gen_seq_show()
5967 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show()
5998 return -EINVAL; in run_aging()
6000 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) in run_aging()
6001 return -ERANGE; in run_aging()
6014 return -EINVAL; in run_eviction()
6016 sc->nr_reclaimed = 0; in run_eviction()
6024 if (sc->nr_reclaimed >= nr_to_reclaim) in run_eviction()
6033 return -EINTR; in run_eviction()
6040 int err = -EINVAL; in run_cmd()
6044 return -EINVAL; in run_cmd()
6056 return -EINVAL; in run_cmd()
6073 case '-': in run_cmd()
6083 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
6091 int err = -EINVAL; in lru_gen_seq_write()
6096 .reclaim_idx = MAX_NR_ZONES - 1, in lru_gen_seq_write()
6102 return -ENOMEM; in lru_gen_seq_write()
6106 return -EFAULT; in lru_gen_seq_write()
6113 err = -ENOMEM; in lru_gen_seq_write()
6127 unsigned int swappiness = -1; in lru_gen_seq_write()
6128 unsigned long opt = -1; in lru_gen_seq_write()
6137 err = -EINVAL; in lru_gen_seq_write()
6184 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
6186 lrugen->max_seq = MIN_NR_GENS + 1; in lru_gen_init_lruvec()
6187 lrugen->enabled = lru_gen_enabled(); in lru_gen_init_lruvec()
6190 lrugen->timestamps[i] = jiffies; in lru_gen_init_lruvec()
6193 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); in lru_gen_init_lruvec()
6195 lruvec->mm_state.seq = MIN_NR_GENS; in lru_gen_init_lruvec()
6204 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
6208 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
6214 INIT_LIST_HEAD(&memcg->mm_list.fifo); in lru_gen_init_memcg()
6215 spin_lock_init(&memcg->mm_list.lock); in lru_gen_init_memcg()
6223 VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo)); in lru_gen_exit_memcg()
6228 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
6229 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
6231 lruvec->lrugen.list.next = LIST_POISON1; in lru_gen_exit_memcg()
6234 bitmap_free(lruvec->mm_state.filters[i]); in lru_gen_exit_memcg()
6235 lruvec->mm_state.filters[i] = NULL; in lru_gen_exit_memcg()
6280 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
6306 sc->priority == DEF_PRIORITY); in shrink_lruvec()
6317 nr[lru] -= nr_to_scan; in shrink_lruvec()
6369 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
6370 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
6371 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
6374 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
6375 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
6376 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
6379 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
6394 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction()
6395 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
6396 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
6403 * Reclaim/compaction is used for high-order allocation requests. It reclaims
6404 * order-0 pages before compacting the zone. should_continue_reclaim() returns
6427 * first, by assuming that zero delta of sc->nr_scanned means full LRU in should_continue_reclaim()
6429 * where always a non-zero amount of pages were scanned. in should_continue_reclaim()
6435 for (z = 0; z <= sc->reclaim_idx; z++) { in should_continue_reclaim()
6436 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
6441 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in should_continue_reclaim()
6442 sc->reclaim_idx, 0)) in should_continue_reclaim()
6445 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) in should_continue_reclaim()
6453 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
6455 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
6463 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; in shrink_node_memcgs()
6473 * This loop can become CPU-bound when target memcgs in shrink_node_memcgs()
6474 * aren't eligible for reclaim - either because they in shrink_node_memcgs()
6495 if (!sc->memcg_low_reclaim) { in shrink_node_memcgs()
6496 sc->memcg_low_skipped = 1; in shrink_node_memcgs()
6502 reclaimed = sc->nr_reclaimed; in shrink_node_memcgs()
6503 scanned = sc->nr_scanned; in shrink_node_memcgs()
6507 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
6508 sc->priority); in shrink_node_memcgs()
6511 if (!sc->proactive) in shrink_node_memcgs()
6512 vmpressure(sc->gfp_mask, memcg, false, in shrink_node_memcgs()
6513 sc->nr_scanned - scanned, in shrink_node_memcgs()
6514 sc->nr_reclaimed - reclaimed); in shrink_node_memcgs()
6530 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
6533 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
6535 nr_reclaimed = sc->nr_reclaimed; in shrink_node()
6536 nr_scanned = sc->nr_scanned; in shrink_node()
6544 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; in shrink_node()
6547 if (!sc->proactive) in shrink_node()
6548 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
6549 sc->nr_scanned - nr_scanned, nr_node_reclaimed); in shrink_node()
6557 * it implies that the long-lived page allocation rate in shrink_node()
6572 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) in shrink_node()
6573 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
6576 if (sc->nr.unqueued_dirty == sc->nr.file_taken) in shrink_node()
6577 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
6586 if (sc->nr.immediate) in shrink_node()
6597 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { in shrink_node()
6599 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); in shrink_node()
6602 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); in shrink_node()
6612 !sc->hibernation_mode && in shrink_node()
6613 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || in shrink_node()
6614 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) in shrink_node()
6627 pgdat->kswapd_failures = 0; in shrink_node()
6631 * Returns true if compaction should go ahead for a costly-order request, or
6640 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in compaction_ready()
6641 sc->reclaim_idx, 0)) in compaction_ready()
6645 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) in compaction_ready()
6657 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
6659 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
6668 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { in consider_reclaim_throttle()
6671 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6688 if (sc->priority == 1 && !sc->nr_reclaimed) in consider_reclaim_throttle()
6693 * This is the direct reclaim path, for page-allocating processes. We only
6715 orig_mask = sc->gfp_mask; in shrink_zones()
6717 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
6718 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
6722 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6736 * non-zero order, only frequent costly order in shrink_zones()
6742 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
6744 sc->compaction_ready = true; in shrink_zones()
6754 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6764 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
6765 sc->order, sc->gfp_mask, in shrink_zones()
6767 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
6768 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
6773 first_pgdat = zone->zone_pgdat; in shrink_zones()
6776 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6778 last_pgdat = zone->zone_pgdat; in shrink_zones()
6779 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
6789 sc->gfp_mask = orig_mask; in shrink_zones()
6802 target_lruvec->refaults[WORKINGSET_ANON] = refaults; in snapshot_refaults()
6804 target_lruvec->refaults[WORKINGSET_FILE] = refaults; in snapshot_refaults()
6814 * high - the zone may be full of dirty or under-writeback pages, which this
6826 int initial_priority = sc->priority; in do_try_to_free_pages()
6834 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); in do_try_to_free_pages()
6837 if (!sc->proactive) in do_try_to_free_pages()
6838 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
6839 sc->priority); in do_try_to_free_pages()
6840 sc->nr_scanned = 0; in do_try_to_free_pages()
6843 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
6846 if (sc->compaction_ready) in do_try_to_free_pages()
6853 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
6854 sc->may_writepage = 1; in do_try_to_free_pages()
6855 } while (--sc->priority >= 0); in do_try_to_free_pages()
6858 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
6859 sc->nodemask) { in do_try_to_free_pages()
6860 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
6862 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
6864 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
6869 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6870 zone->zone_pgdat); in do_try_to_free_pages()
6871 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6877 if (sc->nr_reclaimed) in do_try_to_free_pages()
6878 return sc->nr_reclaimed; in do_try_to_free_pages()
6881 if (sc->compaction_ready) in do_try_to_free_pages()
6893 if (sc->skipped_deactivate) { in do_try_to_free_pages()
6894 sc->priority = initial_priority; in do_try_to_free_pages()
6895 sc->force_deactivate = 1; in do_try_to_free_pages()
6896 sc->skipped_deactivate = 0; in do_try_to_free_pages()
6901 if (sc->memcg_low_skipped) { in do_try_to_free_pages()
6902 sc->priority = initial_priority; in do_try_to_free_pages()
6903 sc->force_deactivate = 0; in do_try_to_free_pages()
6904 sc->memcg_low_reclaim = 1; in do_try_to_free_pages()
6905 sc->memcg_low_skipped = 0; in do_try_to_free_pages()
6920 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6924 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
6942 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6943 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6944 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6946 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6975 if (current->flags & PF_KTHREAD) in throttle_direct_reclaim()
7005 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
7027 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
7031 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
7098 .reclaim_idx = MAX_NR_ZONES - 1, in mem_cgroup_shrink_node()
7102 WARN_ON_ONCE(!current->reclaim_state); in mem_cgroup_shrink_node()
7137 .reclaim_idx = MAX_NR_ZONES - 1, in try_to_free_mem_cgroup_pages()
7198 * Check for watermark boosts top-down as the higher zones in pgdat_watermark_boosted()
7204 for (i = highest_zoneidx; i >= 0; i--) { in pgdat_watermark_boosted()
7205 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
7209 if (zone->watermark_boost) in pgdat_watermark_boosted()
7223 unsigned long mark = -1; in pgdat_balanced()
7227 * Check watermarks bottom-up as lower zones are more likely to in pgdat_balanced()
7231 zone = pgdat->node_zones + i; in pgdat_balanced()
7246 * need balancing by definition. This can happen if a zone-restricted in pgdat_balanced()
7249 if (mark == -1) in pgdat_balanced()
7260 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
7261 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
7262 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
7263 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
7288 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
7289 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
7292 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
7318 sc->nr_to_reclaim = 0; in kswapd_shrink_node()
7319 for (z = 0; z <= sc->reclaim_idx; z++) { in kswapd_shrink_node()
7320 zone = pgdat->node_zones + z; in kswapd_shrink_node()
7324 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
7335 * high-order allocations. If twice the allocation size has been in kswapd_shrink_node()
7336 * reclaimed then recheck watermarks only at order-0 to prevent in kswapd_shrink_node()
7337 * excessive reclaim. Assume that a process requested a high-order in kswapd_shrink_node()
7340 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) in kswapd_shrink_node()
7341 sc->order = 0; in kswapd_shrink_node()
7343 return sc->nr_scanned >= sc->nr_to_reclaim; in kswapd_shrink_node()
7354 zone = pgdat->node_zones + i; in update_reclaim_active()
7360 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
7362 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
7385 * kswapd scans the zones in the highmem->normal->dma direction. It skips
7420 zone = pgdat->node_zones + i; in balance_pgdat()
7424 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
7425 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
7443 * purpose -- on 64-bit systems it is expected that in balance_pgdat()
7444 * buffer_heads are stripped during active rotation. On 32-bit in balance_pgdat()
7451 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { in balance_pgdat()
7452 zone = pgdat->node_zones + i; in balance_pgdat()
7466 * re-evaluate if boosting is required when kswapd next wakes. in balance_pgdat()
7483 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) in balance_pgdat()
7488 * intent is to relieve pressure not issue sub-optimal IO in balance_pgdat()
7506 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
7529 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
7531 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
7544 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; in balance_pgdat()
7545 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); in balance_pgdat()
7556 sc.priority--; in balance_pgdat()
7560 pgdat->kswapd_failures++; in balance_pgdat()
7574 zone = pgdat->node_zones + i; in balance_pgdat()
7575 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
7576 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
7577 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
7602 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7611 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
7625 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7657 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7661 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7662 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7665 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7666 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7675 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7682 * per-cpu vmstat threshold while kswapd is awake and restore in kswapd_try_to_sleep()
7697 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7710 * If there are applications that are active memory-allocators
7716 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; in kswapd()
7719 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
7733 * us from recursively trying to free more memory as we're in kswapd()
7736 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; in kswapd()
7739 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7740 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7741 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7745 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7754 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7757 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7758 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7772 * Reclaim begins at the requested order but if a high-order in kswapd()
7774 * order-0. If that happens, kswapd will consider sleeping in kswapd()
7779 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7787 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); in kswapd()
7793 * A zone is low on free memory or too fragmented for high-order memory. If
7811 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7812 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7815 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7817 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7818 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7820 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7824 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7829 * fragmented for high-order allocations. Wake up kcompactd in wakeup_kswapd()
7839 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7841 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7846 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7858 .reclaim_idx = MAX_NR_ZONES - 1, in shrink_all_memory()
7884 * This kswapd start function will be called by init and node-hot-add.
7891 if (!pgdat->kswapd) { in kswapd_run()
7892 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
7893 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7897 pgdat->kswapd = NULL; in kswapd_run()
7913 kswapd = pgdat->kswapd; in kswapd_stop()
7916 pgdat->kswapd = NULL; in kswapd_stop()
7937 * If non-zero call node_reclaim when the number of free pages falls below
7972 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; in node_unmapped_file_pages()
8000 return nr_pagecache_reclaimable - delta; in node_pagecache_reclaimable()
8024 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
8036 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
8037 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
8044 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __node_reclaim()
8071 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
8073 pgdat->min_slab_pages) in node_reclaim()
8079 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) in node_reclaim()
8088 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
8091 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
8095 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
8105 * check_move_unevictable_folios - Move evictable folios to appropriate zone
8120 for (i = 0; i < fbatch->nr; i++) { in check_move_unevictable_folios()
8121 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios()