Lines Matching refs:folio

80 static void __page_cache_release(struct folio *folio)  in __page_cache_release()  argument
82 if (folio_test_lru(folio)) { in __page_cache_release()
86 lruvec = folio_lruvec_lock_irqsave(folio, &flags); in __page_cache_release()
87 lruvec_del_folio(lruvec, folio); in __page_cache_release()
88 __folio_clear_lru_flags(folio); in __page_cache_release()
92 if (unlikely(folio_test_mlocked(folio))) { in __page_cache_release()
93 long nr_pages = folio_nr_pages(folio); in __page_cache_release()
95 __folio_clear_mlocked(folio); in __page_cache_release()
96 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); in __page_cache_release()
101 static void __folio_put_small(struct folio *folio) in __folio_put_small() argument
103 __page_cache_release(folio); in __folio_put_small()
104 mem_cgroup_uncharge(folio); in __folio_put_small()
105 free_unref_page(&folio->page, 0); in __folio_put_small()
108 static void __folio_put_large(struct folio *folio) in __folio_put_large() argument
116 if (!folio_test_hugetlb(folio)) in __folio_put_large()
117 __page_cache_release(folio); in __folio_put_large()
118 destroy_large_folio(folio); in __folio_put_large()
121 void __folio_put(struct folio *folio) in __folio_put() argument
123 if (unlikely(folio_is_zone_device(folio))) in __folio_put()
124 free_zone_device_page(&folio->page); in __folio_put()
125 else if (unlikely(folio_test_large(folio))) in __folio_put()
126 __folio_put_large(folio); in __folio_put()
128 __folio_put_small(folio); in __folio_put()
140 struct folio *folio, *next; in put_pages_list() local
142 list_for_each_entry_safe(folio, next, pages, lru) { in put_pages_list()
143 if (!folio_put_testzero(folio)) { in put_pages_list()
144 list_del(&folio->lru); in put_pages_list()
147 if (folio_test_large(folio)) { in put_pages_list()
148 list_del(&folio->lru); in put_pages_list()
149 __folio_put_large(folio); in put_pages_list()
190 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
192 static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) in lru_add_fn() argument
194 int was_unevictable = folio_test_clear_unevictable(folio); in lru_add_fn()
195 long nr_pages = folio_nr_pages(folio); in lru_add_fn()
197 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in lru_add_fn()
210 if (folio_evictable(folio)) { in lru_add_fn()
214 folio_clear_active(folio); in lru_add_fn()
215 folio_set_unevictable(folio); in lru_add_fn()
223 folio->mlock_count = 0; in lru_add_fn()
228 lruvec_add_folio(lruvec, folio); in lru_add_fn()
229 trace_mm_lru_insertion(folio); in lru_add_fn()
239 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() local
242 if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) in folio_batch_move_lru()
245 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); in folio_batch_move_lru()
246 move_fn(lruvec, folio); in folio_batch_move_lru()
248 folio_set_lru(folio); in folio_batch_move_lru()
258 struct folio *folio, move_fn_t move_fn) in folio_batch_add_and_move() argument
260 if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) && in folio_batch_add_and_move()
266 static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) in lru_move_tail_fn() argument
268 if (!folio_test_unevictable(folio)) { in lru_move_tail_fn()
269 lruvec_del_folio(lruvec, folio); in lru_move_tail_fn()
270 folio_clear_active(folio); in lru_move_tail_fn()
271 lruvec_add_folio_tail(lruvec, folio); in lru_move_tail_fn()
272 __count_vm_events(PGROTATED, folio_nr_pages(folio)); in lru_move_tail_fn()
283 void folio_rotate_reclaimable(struct folio *folio) in folio_rotate_reclaimable() argument
285 if (!folio_test_locked(folio) && !folio_test_dirty(folio) && in folio_rotate_reclaimable()
286 !folio_test_unevictable(folio) && folio_test_lru(folio)) { in folio_rotate_reclaimable()
290 folio_get(folio); in folio_rotate_reclaimable()
293 folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); in folio_rotate_reclaimable()
338 void lru_note_cost_folio(struct folio *folio) in lru_note_cost_folio() argument
340 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), in lru_note_cost_folio()
341 folio_nr_pages(folio)); in lru_note_cost_folio()
344 static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio) in folio_activate_fn() argument
346 if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { in folio_activate_fn()
347 long nr_pages = folio_nr_pages(folio); in folio_activate_fn()
349 lruvec_del_folio(lruvec, folio); in folio_activate_fn()
350 folio_set_active(folio); in folio_activate_fn()
351 lruvec_add_folio(lruvec, folio); in folio_activate_fn()
352 trace_mm_lru_activate(folio); in folio_activate_fn()
369 void folio_activate(struct folio *folio) in folio_activate() argument
371 if (folio_test_lru(folio) && !folio_test_active(folio) && in folio_activate()
372 !folio_test_unevictable(folio)) { in folio_activate()
375 folio_get(folio); in folio_activate()
378 folio_batch_add_and_move(fbatch, folio, folio_activate_fn); in folio_activate()
388 void folio_activate(struct folio *folio) in folio_activate() argument
392 if (folio_test_clear_lru(folio)) { in folio_activate()
393 lruvec = folio_lruvec_lock_irq(folio); in folio_activate()
394 folio_activate_fn(lruvec, folio); in folio_activate()
396 folio_set_lru(folio); in folio_activate()
401 static void __lru_cache_activate_folio(struct folio *folio) in __lru_cache_activate_folio() argument
420 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio()
422 if (batch_folio == folio) { in __lru_cache_activate_folio()
423 folio_set_active(folio); in __lru_cache_activate_folio()
432 static void folio_inc_refs(struct folio *folio) in folio_inc_refs() argument
434 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_refs()
436 if (folio_test_unevictable(folio)) in folio_inc_refs()
439 if (!folio_test_referenced(folio)) { in folio_inc_refs()
440 folio_set_referenced(folio); in folio_inc_refs()
444 if (!folio_test_workingset(folio)) { in folio_inc_refs()
445 folio_set_workingset(folio); in folio_inc_refs()
457 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_refs()
460 static void folio_inc_refs(struct folio *folio) in folio_inc_refs() argument
475 void folio_mark_accessed(struct folio *folio) in folio_mark_accessed() argument
478 folio_inc_refs(folio); in folio_mark_accessed()
482 if (!folio_test_referenced(folio)) { in folio_mark_accessed()
483 folio_set_referenced(folio); in folio_mark_accessed()
484 } else if (folio_test_unevictable(folio)) { in folio_mark_accessed()
490 } else if (!folio_test_active(folio)) { in folio_mark_accessed()
497 if (folio_test_lru(folio)) in folio_mark_accessed()
498 folio_activate(folio); in folio_mark_accessed()
500 __lru_cache_activate_folio(folio); in folio_mark_accessed()
501 folio_clear_referenced(folio); in folio_mark_accessed()
502 workingset_activation(folio); in folio_mark_accessed()
504 if (folio_test_idle(folio)) in folio_mark_accessed()
505 folio_clear_idle(folio); in folio_mark_accessed()
518 void folio_add_lru(struct folio *folio) in folio_add_lru() argument
522 VM_BUG_ON_FOLIO(folio_test_active(folio) && in folio_add_lru()
523 folio_test_unevictable(folio), folio); in folio_add_lru()
524 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in folio_add_lru()
527 if (lru_gen_enabled() && !folio_test_unevictable(folio) && in folio_add_lru()
529 folio_set_active(folio); in folio_add_lru()
531 folio_get(folio); in folio_add_lru()
534 folio_batch_add_and_move(fbatch, folio, lru_add_fn); in folio_add_lru()
547 void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) in folio_add_lru_vma() argument
549 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in folio_add_lru_vma()
552 mlock_new_page(&folio->page); in folio_add_lru_vma()
554 folio_add_lru(folio); in folio_add_lru_vma()
578 static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio) in lru_deactivate_file_fn() argument
580 bool active = folio_test_active(folio); in lru_deactivate_file_fn()
581 long nr_pages = folio_nr_pages(folio); in lru_deactivate_file_fn()
583 if (folio_test_unevictable(folio)) in lru_deactivate_file_fn()
587 if (folio_mapped(folio)) in lru_deactivate_file_fn()
590 lruvec_del_folio(lruvec, folio); in lru_deactivate_file_fn()
591 folio_clear_active(folio); in lru_deactivate_file_fn()
592 folio_clear_referenced(folio); in lru_deactivate_file_fn()
594 if (folio_test_writeback(folio) || folio_test_dirty(folio)) { in lru_deactivate_file_fn()
601 lruvec_add_folio(lruvec, folio); in lru_deactivate_file_fn()
602 folio_set_reclaim(folio); in lru_deactivate_file_fn()
608 lruvec_add_folio_tail(lruvec, folio); in lru_deactivate_file_fn()
619 static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio) in lru_deactivate_fn() argument
621 if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) { in lru_deactivate_fn()
622 long nr_pages = folio_nr_pages(folio); in lru_deactivate_fn()
624 lruvec_del_folio(lruvec, folio); in lru_deactivate_fn()
625 folio_clear_active(folio); in lru_deactivate_fn()
626 folio_clear_referenced(folio); in lru_deactivate_fn()
627 lruvec_add_folio(lruvec, folio); in lru_deactivate_fn()
635 static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio) in lru_lazyfree_fn() argument
637 if (folio_test_anon(folio) && folio_test_swapbacked(folio) && in lru_lazyfree_fn()
638 !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { in lru_lazyfree_fn()
639 long nr_pages = folio_nr_pages(folio); in lru_lazyfree_fn()
641 lruvec_del_folio(lruvec, folio); in lru_lazyfree_fn()
642 folio_clear_active(folio); in lru_lazyfree_fn()
643 folio_clear_referenced(folio); in lru_lazyfree_fn()
649 folio_clear_swapbacked(folio); in lru_lazyfree_fn()
650 lruvec_add_folio(lruvec, folio); in lru_lazyfree_fn()
707 void deactivate_file_folio(struct folio *folio) in deactivate_file_folio() argument
712 if (folio_test_unevictable(folio)) in deactivate_file_folio()
715 folio_get(folio); in deactivate_file_folio()
718 folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); in deactivate_file_folio()
732 struct folio *folio = page_folio(page); in deactivate_page() local
734 if (folio_test_lru(folio) && !folio_test_unevictable(folio) && in deactivate_page()
735 (folio_test_active(folio) || lru_gen_enabled())) { in deactivate_page()
738 folio_get(folio); in deactivate_page()
741 folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); in deactivate_page()
755 struct folio *folio = page_folio(page); in mark_page_lazyfree() local
757 if (folio_test_lru(folio) && folio_test_anon(folio) && in mark_page_lazyfree()
758 folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && in mark_page_lazyfree()
759 !folio_test_unevictable(folio)) { in mark_page_lazyfree()
762 folio_get(folio); in mark_page_lazyfree()
765 folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); in mark_page_lazyfree()
986 struct folio *folio = page_folio(pages[i]); in release_pages() local
998 if (is_huge_zero_page(&folio->page)) in release_pages()
1001 if (folio_is_zone_device(folio)) { in release_pages()
1006 if (put_devmap_managed_page(&folio->page)) in release_pages()
1008 if (folio_put_testzero(folio)) in release_pages()
1009 free_zone_device_page(&folio->page); in release_pages()
1013 if (!folio_put_testzero(folio)) in release_pages()
1016 if (folio_test_large(folio)) { in release_pages()
1021 __folio_put_large(folio); in release_pages()
1025 if (folio_test_lru(folio)) { in release_pages()
1028 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, in release_pages()
1033 lruvec_del_folio(lruvec, folio); in release_pages()
1034 __folio_clear_lru_flags(folio); in release_pages()
1043 if (unlikely(folio_test_mlocked(folio))) { in release_pages()
1044 __folio_clear_mlocked(folio); in release_pages()
1045 zone_stat_sub_folio(folio, NR_MLOCK); in release_pages()
1049 list_add(&folio->lru, &pages_to_free); in release_pages()
1094 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals() local
1095 if (!xa_is_value(folio)) in folio_batch_remove_exceptionals()
1096 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()