Lines Matching refs:lruvec

601 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,  in lruvec_lru_size()  argument
608 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
614 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size()
2145 static __always_inline void update_lru_sizes(struct lruvec *lruvec, in update_lru_sizes() argument
2154 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
2181 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_folios() argument
2185 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
2269 update_lru_sizes(lruvec, lru, nr_zone_taken); in isolate_lru_folios()
2303 struct lruvec *lruvec; in folio_isolate_lru() local
2306 lruvec = folio_lruvec_lock_irq(folio); in folio_isolate_lru()
2307 lruvec_del_folio(lruvec, folio); in folio_isolate_lru()
2308 unlock_page_lruvec_irq(lruvec); in folio_isolate_lru()
2365 static unsigned int move_folios_to_lru(struct lruvec *lruvec, in move_folios_to_lru() argument
2377 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2379 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2400 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2402 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
2413 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); in move_folios_to_lru()
2414 lruvec_add_folio(lruvec, folio); in move_folios_to_lru()
2418 workingset_age_nonresident(lruvec, nr_pages); in move_folios_to_lru()
2444 struct lruvec *lruvec, struct scan_control *sc, in shrink_inactive_list() argument
2454 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_inactive_list()
2472 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2474 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list, in shrink_inactive_list()
2481 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); in shrink_inactive_list()
2484 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2491 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2492 move_folios_to_lru(lruvec, &folio_list); in shrink_inactive_list()
2498 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); in shrink_inactive_list()
2500 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2502 lru_note_cost(lruvec, file, stat.nr_pageout); in shrink_inactive_list()
2564 struct lruvec *lruvec, in shrink_active_list() argument
2577 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_active_list()
2581 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2583 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, in shrink_active_list()
2590 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); in shrink_active_list()
2592 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2641 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2643 nr_activate = move_folios_to_lru(lruvec, &l_active); in shrink_active_list()
2644 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); in shrink_active_list()
2649 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); in shrink_active_list()
2652 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2718 struct lruvec *lruvec, struct scan_control *sc) in shrink_list() argument
2722 shrink_active_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
2728 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
2759 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) in inactive_is_low() argument
2766 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); in inactive_is_low()
2767 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); in inactive_is_low()
2788 struct lruvec *target_lruvec; in prepare_scan_count()
2898 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, in get_scan_count() argument
2901 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_scan_count()
2902 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in get_scan_count()
2993 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
3126 #define DEFINE_MAX_SEQ(lruvec) \ argument
3127 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
3129 #define DEFINE_MIN_SEQ(lruvec) \ argument
3131 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
3132 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
3140 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) in get_lruvec()
3146 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec() local
3149 if (!lruvec->pgdat) in get_lruvec()
3150 lruvec->pgdat = pgdat; in get_lruvec()
3152 return lruvec; in get_lruvec()
3160 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) in get_swappiness() argument
3162 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in get_swappiness()
3163 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_swappiness()
3172 static int get_nr_gens(struct lruvec *lruvec, int type) in get_nr_gens() argument
3174 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
3177 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) in seq_is_valid() argument
3180 return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && in seq_is_valid()
3181 get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && in seq_is_valid()
3182 get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; in seq_is_valid()
3219 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_add_mm() local
3221 if (!lruvec) in lru_gen_add_mm()
3225 if (lruvec->mm_state.tail == &mm_list->fifo) in lru_gen_add_mm()
3226 lruvec->mm_state.tail = &mm->lru_gen.list; in lru_gen_add_mm()
3251 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_del_mm() local
3253 if (!lruvec) in lru_gen_del_mm()
3257 if (lruvec->mm_state.tail == &mm->lru_gen.list) in lru_gen_del_mm()
3258 lruvec->mm_state.tail = lruvec->mm_state.tail->next; in lru_gen_del_mm()
3261 if (lruvec->mm_state.head != &mm->lru_gen.list) in lru_gen_del_mm()
3264 lruvec->mm_state.head = lruvec->mm_state.head->next; in lru_gen_del_mm()
3266 if (lruvec->mm_state.head == &mm_list->fifo) in lru_gen_del_mm()
3267 WRITE_ONCE(lruvec->mm_state.seq, lruvec->mm_state.seq + 1); in lru_gen_del_mm()
3346 static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq) in reset_bloom_filter() argument
3351 filter = lruvec->mm_state.filters[gen]; in reset_bloom_filter()
3359 WRITE_ONCE(lruvec->mm_state.filters[gen], filter); in reset_bloom_filter()
3362 static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) in update_bloom_filter() argument
3368 filter = READ_ONCE(lruvec->mm_state.filters[gen]); in update_bloom_filter()
3380 static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) in test_bloom_filter() argument
3386 filter = READ_ONCE(lruvec->mm_state.filters[gen]); in test_bloom_filter()
3395 static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last) in reset_mm_stats() argument
3400 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
3406 WRITE_ONCE(lruvec->mm_state.stats[hist][i], in reset_mm_stats()
3407 lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]); in reset_mm_stats()
3413 hist = lru_hist_from_seq(lruvec->mm_state.seq + 1); in reset_mm_stats()
3416 WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0); in reset_mm_stats()
3424 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in should_skip_mm()
3444 static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, in iterate_mm_list() argument
3450 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in iterate_mm_list()
3452 struct lru_gen_mm_state *mm_state = &lruvec->mm_state; in iterate_mm_list()
3511 reset_mm_stats(lruvec, walk, last); in iterate_mm_list()
3516 reset_bloom_filter(lruvec, walk->max_seq + 1); in iterate_mm_list()
3526 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) in iterate_mm_list_nowalk() argument
3529 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in iterate_mm_list_nowalk()
3531 struct lru_gen_mm_state *mm_state = &lruvec->mm_state; in iterate_mm_list_nowalk()
3541 reset_mm_stats(lruvec, NULL, true); in iterate_mm_list_nowalk()
3579 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, in read_ctrl_pos() argument
3582 struct lru_gen_struct *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3594 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) in reset_ctrl_pos() argument
3597 struct lru_gen_struct *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3601 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3671 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in folio_inc_gen() argument
3674 struct lru_gen_struct *lrugen = &lruvec->lrugen; in folio_inc_gen()
3695 lru_gen_update_size(lruvec, folio, old_gen, new_gen); in folio_inc_gen()
3716 static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk) in reset_batch_size() argument
3719 struct lru_gen_struct *lrugen = &lruvec->lrugen; in reset_batch_size()
3734 if (lru_gen_is_active(lruvec, gen)) in reset_batch_size()
3736 __update_lru_size(lruvec, lru, zone, delta); in reset_batch_size()
3885 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
3886 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
3954 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
3955 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
4069 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
4096 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) in walk_pmd_range()
4107 update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i); in walk_pmd_range()
4140 if (wq_has_sleeper(&walk->lruvec->mm_state.wait)) in walk_pud_range()
4162 static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk) in walk_mm() argument
4170 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in walk_mm()
4191 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
4192 reset_batch_size(lruvec, walk); in walk_mm()
4193 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
4232 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) in inc_min_seq() argument
4236 struct lru_gen_struct *lrugen = &lruvec->lrugen; in inc_min_seq()
4254 new_gen = folio_inc_gen(lruvec, folio, false); in inc_min_seq()
4262 reset_ctrl_pos(lruvec, type, true); in inc_min_seq()
4268 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) in try_to_inc_min_seq() argument
4272 struct lru_gen_struct *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
4273 DEFINE_MIN_SEQ(lruvec); in try_to_inc_min_seq()
4275 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); in try_to_inc_min_seq()
4303 reset_ctrl_pos(lruvec, type, true); in try_to_inc_min_seq()
4311 static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan) in inc_max_seq() argument
4315 struct lru_gen_struct *lrugen = &lruvec->lrugen; in inc_max_seq()
4317 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
4319 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); in inc_max_seq()
4322 if (get_nr_gens(lruvec, type) != MAX_NR_GENS) in inc_max_seq()
4327 while (!inc_min_seq(lruvec, type, can_swap)) { in inc_max_seq()
4328 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4330 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
4352 __update_lru_size(lruvec, lru, zone, delta); in inc_max_seq()
4353 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
4358 reset_ctrl_pos(lruvec, type, false); in inc_max_seq()
4364 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4367 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, in try_to_inc_max_seq() argument
4373 struct lru_gen_struct *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
4378 if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) { in try_to_inc_max_seq()
4390 success = iterate_mm_list_nowalk(lruvec, max_seq); in try_to_inc_max_seq()
4396 success = iterate_mm_list_nowalk(lruvec, max_seq); in try_to_inc_max_seq()
4400 walk->lruvec = lruvec; in try_to_inc_max_seq()
4406 success = iterate_mm_list(lruvec, walk, &mm); in try_to_inc_max_seq()
4408 walk_mm(lruvec, mm, walk); in try_to_inc_max_seq()
4415 wait_event_killable(lruvec->mm_state.wait, in try_to_inc_max_seq()
4423 inc_max_seq(lruvec, can_swap, force_scan); in try_to_inc_max_seq()
4425 if (wq_has_sleeper(&lruvec->mm_state.wait)) in try_to_inc_max_seq()
4426 wake_up_all(&lruvec->mm_state.wait); in try_to_inc_max_seq()
4431 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq, in should_run_aging() argument
4438 struct lru_gen_struct *lrugen = &lruvec->lrugen; in should_run_aging()
4439 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in should_run_aging()
4488 static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl) in age_lruvec() argument
4492 int swappiness = get_swappiness(lruvec, sc); in age_lruvec()
4493 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in age_lruvec()
4494 DEFINE_MAX_SEQ(lruvec); in age_lruvec()
4495 DEFINE_MIN_SEQ(lruvec); in age_lruvec()
4504 need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan); in age_lruvec()
4508 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in age_lruvec()
4519 try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false); in age_lruvec()
4552 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_age_node() local
4554 if (age_lruvec(lruvec, sc, min_ttl)) in lru_gen_age_node()
4602 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_look_around() local
4603 DEFINE_MAX_SEQ(lruvec); in lru_gen_look_around()
4670 update_bloom_filter(lruvec, max_seq, pvmw->pmd); in lru_gen_look_around()
4685 spin_lock_irq(&lruvec->lru_lock); in lru_gen_look_around()
4686 new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq); in lru_gen_look_around()
4701 lru_gen_update_size(lruvec, folio, old_gen, new_gen); in lru_gen_look_around()
4705 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_look_around()
4714 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx) in sort_folio() argument
4723 struct lru_gen_struct *lrugen = &lruvec->lrugen; in sort_folio()
4729 success = lru_gen_del_folio(lruvec, folio, true); in sort_folio()
4732 lruvec_add_folio(lruvec, folio); in sort_folio()
4739 success = lru_gen_del_folio(lruvec, folio, true); in sort_folio()
4742 lruvec_add_folio_tail(lruvec, folio); in sort_folio()
4756 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4761 __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); in sort_folio()
4768 gen = folio_inc_gen(lruvec, folio, true); in sort_folio()
4776 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) in isolate_folio() argument
4808 success = lru_gen_del_folio(lruvec, folio, true); in isolate_folio()
4814 static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, in scan_folios() argument
4823 struct lru_gen_struct *lrugen = &lruvec->lrugen; in scan_folios()
4824 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in scan_folios()
4828 if (get_nr_gens(lruvec, type) == MIN_NR_GENS) in scan_folios()
4849 if (sort_folio(lruvec, folio, tier)) in scan_folios()
4851 else if (isolate_folio(lruvec, folio, sc)) { in scan_folios()
4889 static int get_tier_idx(struct lruvec *lruvec, int type) in get_tier_idx() argument
4899 read_ctrl_pos(lruvec, type, 0, 1, &sp); in get_tier_idx()
4901 read_ctrl_pos(lruvec, type, tier, 2, &pv); in get_tier_idx()
4909 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) in get_type_to_scan() argument
4921 read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); in get_type_to_scan()
4922 read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); in get_type_to_scan()
4925 read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); in get_type_to_scan()
4927 read_ctrl_pos(lruvec, type, tier, gain[type], &pv); in get_type_to_scan()
4937 static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, in isolate_folios() argument
4944 DEFINE_MIN_SEQ(lruvec); in isolate_folios()
4960 type = get_type_to_scan(lruvec, swappiness, &tier); in isolate_folios()
4964 tier = get_tier_idx(lruvec, type); in isolate_folios()
4966 scanned = scan_folios(lruvec, sc, type, tier, list); in isolate_folios()
4979 static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, in evict_folios() argument
4993 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in evict_folios()
4994 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in evict_folios()
4996 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4998 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list); in evict_folios()
5000 scanned += try_to_inc_min_seq(lruvec, swappiness); in evict_folios()
5002 if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) in evict_folios()
5005 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
5042 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
5044 move_folios_to_lru(lruvec, &list); in evict_folios()
5048 reset_batch_size(lruvec, walk); in evict_folios()
5056 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
5080 static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, in get_nr_to_scan() argument
5084 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in get_nr_to_scan()
5085 DEFINE_MAX_SEQ(lruvec); in get_nr_to_scan()
5086 DEFINE_MIN_SEQ(lruvec); in get_nr_to_scan()
5092 *need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan); in get_nr_to_scan()
5104 if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false)) in get_nr_to_scan()
5110 static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq, in should_abort_scan() argument
5114 DEFINE_MAX_SEQ(lruvec); in should_abort_scan()
5148 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
5163 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) in lru_gen_shrink_lruvec() argument
5170 DEFINE_MAX_SEQ(lruvec); in lru_gen_shrink_lruvec()
5176 set_mm_walk(lruvec_pgdat(lruvec)); in lru_gen_shrink_lruvec()
5184 swappiness = get_swappiness(lruvec, sc); in lru_gen_shrink_lruvec()
5185 else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc)) in lru_gen_shrink_lruvec()
5190 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging); in lru_gen_shrink_lruvec()
5194 delta = evict_folios(lruvec, sc, swappiness, &need_swapping); in lru_gen_shrink_lruvec()
5202 if (should_abort_scan(lruvec, max_seq, sc, need_swapping)) in lru_gen_shrink_lruvec()
5221 static bool __maybe_unused state_is_valid(struct lruvec *lruvec) in state_is_valid() argument
5223 struct lru_gen_struct *lrugen = &lruvec->lrugen; in state_is_valid()
5229 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
5244 static bool fill_evictable(struct lruvec *lruvec) in fill_evictable() argument
5252 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
5263 lruvec_del_folio(lruvec, folio); in fill_evictable()
5264 success = lru_gen_add_folio(lruvec, folio, false); in fill_evictable()
5275 static bool drain_evictable(struct lruvec *lruvec) in drain_evictable() argument
5281 struct list_head *head = &lruvec->lrugen.lists[gen][type][zone]; in drain_evictable()
5292 success = lru_gen_del_folio(lruvec, folio, false); in drain_evictable()
5294 lruvec_add_folio(lruvec, folio); in drain_evictable()
5328 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_change_state() local
5330 if (!lruvec) in lru_gen_change_state()
5333 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5335 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); in lru_gen_change_state()
5336 VM_WARN_ON_ONCE(!state_is_valid(lruvec)); in lru_gen_change_state()
5338 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5340 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { in lru_gen_change_state()
5341 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5343 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5346 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5498 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, in lru_gen_seq_show_full() argument
5505 struct lru_gen_struct *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5538 n = READ_ONCE(lruvec->mm_state.stats[hist][i]); in lru_gen_seq_show_full()
5541 n = READ_ONCE(lruvec->mm_state.stats[hist][i]); in lru_gen_seq_show_full()
5554 struct lruvec *lruvec = v; in lru_gen_seq_show() local
5555 struct lru_gen_struct *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5556 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5557 struct mem_cgroup *memcg = lruvec_memcg(lruvec); in lru_gen_seq_show()
5558 DEFINE_MAX_SEQ(lruvec); in lru_gen_seq_show()
5559 DEFINE_MIN_SEQ(lruvec); in lru_gen_seq_show()
5583 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5600 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); in lru_gen_seq_show()
5613 static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, in run_aging() argument
5616 DEFINE_MAX_SEQ(lruvec); in run_aging()
5617 DEFINE_MIN_SEQ(lruvec); in run_aging()
5628 try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan); in run_aging()
5633 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, in run_eviction() argument
5636 DEFINE_MAX_SEQ(lruvec); in run_eviction()
5644 DEFINE_MIN_SEQ(lruvec); in run_eviction()
5652 if (!evict_folios(lruvec, sc, swappiness, NULL)) in run_eviction()
5664 struct lruvec *lruvec; in run_cmd() local
5687 lruvec = get_lruvec(memcg, nid); in run_cmd()
5690 swappiness = get_swappiness(lruvec, sc); in run_cmd()
5696 err = run_aging(lruvec, seq, sc, swappiness, opt); in run_cmd()
5699 err = run_eviction(lruvec, seq, sc, swappiness, opt); in run_cmd()
5805 void lru_gen_init_lruvec(struct lruvec *lruvec) in lru_gen_init_lruvec() argument
5809 struct lru_gen_struct *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
5820 lruvec->mm_state.seq = MIN_NR_GENS; in lru_gen_init_lruvec()
5821 init_waitqueue_head(&lruvec->mm_state.wait); in lru_gen_init_lruvec()
5837 struct lruvec *lruvec = get_lruvec(memcg, nid); in lru_gen_exit_memcg() local
5839 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
5840 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
5843 bitmap_free(lruvec->mm_state.filters[i]); in lru_gen_exit_memcg()
5844 lruvec->mm_state.filters[i] = NULL; in lru_gen_exit_memcg()
5871 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) in lru_gen_shrink_lruvec() argument
5877 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) in shrink_lruvec() argument
5889 lru_gen_shrink_lruvec(lruvec, sc); in shrink_lruvec()
5893 get_scan_count(lruvec, sc, nr); in shrink_lruvec()
5924 lruvec, sc); in shrink_lruvec()
5989 if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) && in shrink_lruvec()
5990 inactive_is_low(lruvec, LRU_INACTIVE_ANON)) in shrink_lruvec()
5991 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, in shrink_lruvec()
6073 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_node_memcgs() local
6110 shrink_lruvec(lruvec, sc); in shrink_node_memcgs()
6128 struct lruvec *target_lruvec; in shrink_node()
6394 struct lruvec *target_lruvec; in snapshot_refaults()
6467 struct lruvec *lruvec; in do_try_to_free_pages() local
6469 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6471 clear_bit(LRUVEC_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6692 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in mem_cgroup_shrink_node() local
6717 shrink_lruvec(lruvec, &sc); in mem_cgroup_shrink_node()
6769 struct lruvec *lruvec; in kswapd_age_node() local
6779 lruvec = mem_cgroup_lruvec(NULL, pgdat); in kswapd_age_node()
6780 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) in kswapd_age_node()
6785 lruvec = mem_cgroup_lruvec(memcg, pgdat); in kswapd_age_node()
6786 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, in kswapd_age_node()
6858 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); in clear_pgdat_congested() local
6860 clear_bit(LRUVEC_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
7731 struct lruvec *lruvec = NULL; in check_move_unevictable_folios() local
7746 lruvec = folio_lruvec_relock_irq(folio, lruvec); in check_move_unevictable_folios()
7748 lruvec_del_folio(lruvec, folio); in check_move_unevictable_folios()
7750 lruvec_add_folio(lruvec, folio); in check_move_unevictable_folios()
7756 if (lruvec) { in check_move_unevictable_folios()
7759 unlock_page_lruvec_irq(lruvec); in check_move_unevictable_folios()