Lines Matching refs:folio

62 	struct folio *folio = folio_get_nontail_page(page);  in isolate_movable_page()  local
74 if (!folio) in isolate_movable_page()
77 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
86 if (unlikely(!__folio_test_movable(folio))) in isolate_movable_page()
90 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
104 if (unlikely(!folio_trylock(folio))) in isolate_movable_page()
107 if (!folio_test_movable(folio) || folio_test_isolated(folio)) in isolate_movable_page()
110 mops = folio_movable_ops(folio); in isolate_movable_page()
111 VM_BUG_ON_FOLIO(!mops, folio); in isolate_movable_page()
113 if (!mops->isolate_page(&folio->page, mode)) in isolate_movable_page()
117 WARN_ON_ONCE(folio_test_isolated(folio)); in isolate_movable_page()
118 folio_set_isolated(folio); in isolate_movable_page()
119 folio_unlock(folio); in isolate_movable_page()
124 folio_unlock(folio); in isolate_movable_page()
126 folio_put(folio); in isolate_movable_page()
131 static void putback_movable_folio(struct folio *folio) in putback_movable_folio() argument
133 const struct movable_operations *mops = folio_movable_ops(folio); in putback_movable_folio()
135 mops->putback_page(&folio->page); in putback_movable_folio()
136 folio_clear_isolated(folio); in putback_movable_folio()
149 struct folio *folio; in putback_movable_pages() local
150 struct folio *folio2; in putback_movable_pages()
152 list_for_each_entry_safe(folio, folio2, l, lru) { in putback_movable_pages()
153 if (unlikely(folio_test_hugetlb(folio))) { in putback_movable_pages()
154 folio_putback_active_hugetlb(folio); in putback_movable_pages()
157 list_del(&folio->lru); in putback_movable_pages()
163 if (unlikely(__folio_test_movable(folio))) { in putback_movable_pages()
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); in putback_movable_pages()
165 folio_lock(folio); in putback_movable_pages()
166 if (folio_test_movable(folio)) in putback_movable_pages()
167 putback_movable_folio(folio); in putback_movable_pages()
169 folio_clear_isolated(folio); in putback_movable_pages()
170 folio_unlock(folio); in putback_movable_pages()
171 folio_put(folio); in putback_movable_pages()
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON + in putback_movable_pages()
174 folio_is_file_lru(folio), -folio_nr_pages(folio)); in putback_movable_pages()
175 folio_putback_lru(folio); in putback_movable_pages()
183 static bool remove_migration_pte(struct folio *folio, in remove_migration_pte() argument
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) in remove_migration_pte()
199 new = folio_page(folio, idx); in remove_migration_pte()
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in remove_migration_pte()
205 !folio_test_pmd_mappable(folio), folio); in remove_migration_pte()
211 folio_get(folio); in remove_migration_pte()
220 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) in remove_migration_pte()
227 if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) in remove_migration_pte()
245 if (folio_test_hugetlb(folio)) { in remove_migration_pte()
251 if (folio_test_anon(folio)) in remove_migration_pte()
261 if (folio_test_anon(folio)) in remove_migration_pte()
285 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) in remove_migration_ptes()
379 struct folio *folio) in folio_expected_refs() argument
385 refs += folio_nr_pages(folio); in folio_expected_refs()
386 if (folio_test_private(folio)) in folio_expected_refs()
401 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument
403 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in folio_migrate_mapping()
406 int expected_count = folio_expected_refs(mapping, folio) + extra_count; in folio_migrate_mapping()
407 long nr = folio_nr_pages(folio); in folio_migrate_mapping()
411 if (folio_ref_count(folio) != expected_count) in folio_migrate_mapping()
415 newfolio->index = folio->index; in folio_migrate_mapping()
416 newfolio->mapping = folio->mapping; in folio_migrate_mapping()
417 if (folio_test_swapbacked(folio)) in folio_migrate_mapping()
423 oldzone = folio_zone(folio); in folio_migrate_mapping()
427 if (!folio_ref_freeze(folio, expected_count)) { in folio_migrate_mapping()
436 newfolio->index = folio->index; in folio_migrate_mapping()
437 newfolio->mapping = folio->mapping; in folio_migrate_mapping()
439 if (folio_test_swapbacked(folio)) { in folio_migrate_mapping()
441 if (folio_test_swapcache(folio)) { in folio_migrate_mapping()
443 newfolio->private = folio_get_private(folio); in folio_migrate_mapping()
446 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); in folio_migrate_mapping()
450 dirty = folio_test_dirty(folio); in folio_migrate_mapping()
452 folio_clear_dirty(folio); in folio_migrate_mapping()
463 folio_ref_unfreeze(folio, expected_count - nr); in folio_migrate_mapping()
482 memcg = folio_memcg(folio); in folio_migrate_mapping()
488 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { in folio_migrate_mapping()
492 if (folio_test_pmd_mappable(folio)) { in folio_migrate_mapping()
498 if (folio_test_swapcache(folio)) { in folio_migrate_mapping()
521 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping()
550 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument
554 if (folio_test_error(folio)) in folio_migrate_flags()
556 if (folio_test_referenced(folio)) in folio_migrate_flags()
558 if (folio_test_uptodate(folio)) in folio_migrate_flags()
560 if (folio_test_clear_active(folio)) { in folio_migrate_flags()
561 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); in folio_migrate_flags()
563 } else if (folio_test_clear_unevictable(folio)) in folio_migrate_flags()
565 if (folio_test_workingset(folio)) in folio_migrate_flags()
567 if (folio_test_checked(folio)) in folio_migrate_flags()
575 if (folio_test_mappedtodisk(folio)) in folio_migrate_flags()
579 if (folio_test_dirty(folio)) in folio_migrate_flags()
582 if (folio_test_young(folio)) in folio_migrate_flags()
584 if (folio_test_idle(folio)) in folio_migrate_flags()
591 cpupid = page_cpupid_xchg_last(&folio->page, -1); in folio_migrate_flags()
598 bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); in folio_migrate_flags()
606 folio_migrate_ksm(newfolio, folio); in folio_migrate_flags()
611 if (folio_test_swapcache(folio)) in folio_migrate_flags()
612 folio_clear_swapcache(folio); in folio_migrate_flags()
613 folio_clear_private(folio); in folio_migrate_flags()
616 if (!folio_test_hugetlb(folio)) in folio_migrate_flags()
617 folio->private = NULL; in folio_migrate_flags()
631 if (folio_test_readahead(folio)) in folio_migrate_flags()
634 folio_copy_owner(newfolio, folio); in folio_migrate_flags()
636 if (!folio_test_hugetlb(folio)) in folio_migrate_flags()
637 mem_cgroup_migrate(folio, newfolio); in folio_migrate_flags()
641 void folio_migrate_copy(struct folio *newfolio, struct folio *folio) in folio_migrate_copy() argument
643 folio_copy(newfolio, folio); in folio_migrate_copy()
644 folio_migrate_flags(newfolio, folio); in folio_migrate_copy()
652 int migrate_folio_extra(struct address_space *mapping, struct folio *dst, in migrate_folio_extra()
653 struct folio *src, enum migrate_mode mode, int extra_count) in migrate_folio_extra()
683 int migrate_folio(struct address_space *mapping, struct folio *dst, in migrate_folio()
684 struct folio *src, enum migrate_mode mode) in migrate_folio()
725 struct folio *dst, struct folio *src, enum migrate_mode mode, in __buffer_migrate_folio()
817 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio()
838 struct folio *dst, struct folio *src, enum migrate_mode mode) in buffer_migrate_folio_norefs()
846 struct folio *dst, struct folio *src, enum migrate_mode mode) in filemap_migrate_folio()
868 static int writeout(struct address_space *mapping, struct folio *folio) in writeout() argument
883 if (!folio_clear_dirty_for_io(folio)) in writeout()
895 remove_migration_ptes(folio, folio, false); in writeout()
897 rc = mapping->a_ops->writepage(&folio->page, &wbc); in writeout()
901 folio_lock(folio); in writeout()
910 struct folio *dst, struct folio *src, enum migrate_mode mode) in fallback_migrate_folio()
945 static int move_to_new_folio(struct folio *dst, struct folio *src, in move_to_new_folio()
1031 static void __migrate_folio_record(struct folio *dst, in __migrate_folio_record()
1040 static void __migrate_folio_extract(struct folio *dst, in __migrate_folio_extract()
1052 static void migrate_folio_undo_src(struct folio *src, in migrate_folio_undo_src()
1070 static void migrate_folio_undo_dst(struct folio *dst, bool locked, in migrate_folio_undo_dst()
1082 static void migrate_folio_done(struct folio *src, in migrate_folio_done()
1102 struct folio *src, struct folio **dstp, enum migrate_mode mode, in migrate_folio_unmap()
1105 struct folio *dst; in migrate_folio_unmap()
1260 struct folio *src, struct folio *dst, in migrate_folio_move()
1356 struct folio *src, int force, enum migrate_mode mode, in unmap_and_move_huge_page()
1359 struct folio *dst; in unmap_and_move_huge_page()
1468 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) in try_split_folio() argument
1472 folio_lock(folio); in try_split_folio()
1473 rc = split_folio_to_list(folio, split_folios); in try_split_folio()
1474 folio_unlock(folio); in try_split_folio()
1476 list_move_tail(&folio->lru, split_folios); in try_split_folio()
1518 struct folio *folio, *folio2; in migrate_hugetlbs() local
1525 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_hugetlbs()
1526 if (!folio_test_hugetlb(folio)) in migrate_hugetlbs()
1529 nr_pages = folio_nr_pages(folio); in migrate_hugetlbs()
1540 if (!hugepage_migration_supported(folio_hstate(folio))) { in migrate_hugetlbs()
1543 list_move_tail(&folio->lru, ret_folios); in migrate_hugetlbs()
1549 folio, pass > 2, mode, in migrate_hugetlbs()
1618 struct folio *folio, *folio2, *dst = NULL, *dst2; in migrate_pages_batch() local
1632 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages_batch()
1633 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); in migrate_pages_batch()
1634 nr_pages = folio_nr_pages(folio); in migrate_pages_batch()
1651 if (!try_split_folio(folio, split_folios)) { in migrate_pages_batch()
1656 list_move_tail(&folio->lru, ret_folios); in migrate_pages_batch()
1661 private, folio, &dst, mode, reason, in migrate_pages_batch()
1681 if (folio_test_large(folio) && !nosplit) { in migrate_pages_batch()
1682 int ret = try_split_folio(folio, split_folios); in migrate_pages_batch()
1721 list_move_tail(&folio->lru, &unmap_folios); in migrate_pages_batch()
1751 dst = list_first_entry(&dst_folios, struct folio, lru); in migrate_pages_batch()
1753 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { in migrate_pages_batch()
1754 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); in migrate_pages_batch()
1755 nr_pages = folio_nr_pages(folio); in migrate_pages_batch()
1760 folio, dst, mode, in migrate_pages_batch()
1795 dst = list_first_entry(&dst_folios, struct folio, lru); in migrate_pages_batch()
1797 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { in migrate_pages_batch()
1802 migrate_folio_undo_src(folio, page_was_mapped, anon_vma, in migrate_pages_batch()
1891 struct folio *folio, *folio2; in migrate_pages() local
1908 list_for_each_entry_safe(folio, folio2, from, lru) { in migrate_pages()
1910 if (folio_test_hugetlb(folio)) { in migrate_pages()
1911 list_move_tail(&folio->lru, &ret_folios); in migrate_pages()
1915 nr_pages += folio_nr_pages(folio); in migrate_pages()
1981 struct folio *alloc_migration_target(struct folio *src, unsigned long private) in alloc_migration_target()
2477 static struct folio *alloc_misplaced_dst_folio(struct folio *src, in alloc_misplaced_dst_folio()