Lines Matching refs:folio
140 struct folio **foliop, enum sgp_type sgp,
549 struct folio *folio; in shmem_unused_huge_shrink() local
602 folio = filemap_get_folio(inode->i_mapping, index); in shmem_unused_huge_shrink()
603 if (!folio) in shmem_unused_huge_shrink()
607 if (!folio_test_large(folio)) { in shmem_unused_huge_shrink()
608 folio_put(folio); in shmem_unused_huge_shrink()
619 if (!folio_trylock(folio)) { in shmem_unused_huge_shrink()
620 folio_put(folio); in shmem_unused_huge_shrink()
624 ret = split_folio(folio); in shmem_unused_huge_shrink()
625 folio_unlock(folio); in shmem_unused_huge_shrink()
626 folio_put(folio); in shmem_unused_huge_shrink()
691 static int shmem_add_to_page_cache(struct folio *folio, in shmem_add_to_page_cache() argument
696 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache()
697 long nr = folio_nr_pages(folio); in shmem_add_to_page_cache()
700 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); in shmem_add_to_page_cache()
701 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in shmem_add_to_page_cache()
702 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); in shmem_add_to_page_cache()
703 VM_BUG_ON(expected && folio_test_large(folio)); in shmem_add_to_page_cache()
705 folio_ref_add(folio, nr); in shmem_add_to_page_cache()
706 folio->mapping = mapping; in shmem_add_to_page_cache()
707 folio->index = index; in shmem_add_to_page_cache()
709 if (!folio_test_swapcache(folio)) { in shmem_add_to_page_cache()
710 error = mem_cgroup_charge(folio, charge_mm, gfp); in shmem_add_to_page_cache()
712 if (folio_test_pmd_mappable(folio)) { in shmem_add_to_page_cache()
719 folio_throttle_swaprate(folio, gfp); in shmem_add_to_page_cache()
731 xas_store(&xas, folio); in shmem_add_to_page_cache()
734 if (folio_test_pmd_mappable(folio)) { in shmem_add_to_page_cache()
736 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); in shmem_add_to_page_cache()
739 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); in shmem_add_to_page_cache()
740 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); in shmem_add_to_page_cache()
752 folio->mapping = NULL; in shmem_add_to_page_cache()
753 folio_ref_sub(folio, nr); in shmem_add_to_page_cache()
760 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) in shmem_delete_from_page_cache() argument
762 struct address_space *mapping = folio->mapping; in shmem_delete_from_page_cache()
763 long nr = folio_nr_pages(folio); in shmem_delete_from_page_cache()
767 error = shmem_replace_entry(mapping, folio->index, folio, radswap); in shmem_delete_from_page_cache()
768 folio->mapping = NULL; in shmem_delete_from_page_cache()
770 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in shmem_delete_from_page_cache()
771 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in shmem_delete_from_page_cache()
773 folio_put(folio); in shmem_delete_from_page_cache()
877 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) in shmem_get_partial_folio()
879 struct folio *folio; in shmem_get_partial_folio() local
885 folio = __filemap_get_folio(inode->i_mapping, index, in shmem_get_partial_folio()
887 if (!xa_is_value(folio)) in shmem_get_partial_folio()
888 return folio; in shmem_get_partial_folio()
893 folio = NULL; in shmem_get_partial_folio()
894 shmem_get_folio(inode, index, &folio, SGP_READ); in shmem_get_partial_folio()
895 return folio; in shmem_get_partial_folio()
911 struct folio *folio; in shmem_undo_range() local
928 folio = fbatch.folios[i]; in shmem_undo_range()
932 if (xa_is_value(folio)) { in shmem_undo_range()
936 index, folio); in shmem_undo_range()
939 index += folio_nr_pages(folio) - 1; in shmem_undo_range()
941 if (!unfalloc || !folio_test_uptodate(folio)) in shmem_undo_range()
942 truncate_inode_folio(mapping, folio); in shmem_undo_range()
943 folio_unlock(folio); in shmem_undo_range()
961 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); in shmem_undo_range()
962 if (folio) { in shmem_undo_range()
963 same_folio = lend < folio_pos(folio) + folio_size(folio); in shmem_undo_range()
964 folio_mark_dirty(folio); in shmem_undo_range()
965 if (!truncate_inode_partial_folio(folio, lstart, lend)) { in shmem_undo_range()
966 start = folio->index + folio_nr_pages(folio); in shmem_undo_range()
968 end = folio->index; in shmem_undo_range()
970 folio_unlock(folio); in shmem_undo_range()
971 folio_put(folio); in shmem_undo_range()
972 folio = NULL; in shmem_undo_range()
976 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); in shmem_undo_range()
977 if (folio) { in shmem_undo_range()
978 folio_mark_dirty(folio); in shmem_undo_range()
979 if (!truncate_inode_partial_folio(folio, lstart, lend)) in shmem_undo_range()
980 end = folio->index; in shmem_undo_range()
981 folio_unlock(folio); in shmem_undo_range()
982 folio_put(folio); in shmem_undo_range()
1001 folio = fbatch.folios[i]; in shmem_undo_range()
1004 if (xa_is_value(folio)) { in shmem_undo_range()
1007 if (shmem_free_swap(mapping, index, folio)) { in shmem_undo_range()
1016 folio_lock(folio); in shmem_undo_range()
1018 if (!unfalloc || !folio_test_uptodate(folio)) { in shmem_undo_range()
1019 if (folio_mapping(folio) != mapping) { in shmem_undo_range()
1021 folio_unlock(folio); in shmem_undo_range()
1025 VM_BUG_ON_FOLIO(folio_test_writeback(folio), in shmem_undo_range()
1026 folio); in shmem_undo_range()
1027 truncate_inode_folio(mapping, folio); in shmem_undo_range()
1029 index = folio->index + folio_nr_pages(folio) - 1; in shmem_undo_range()
1030 folio_unlock(folio); in shmem_undo_range()
1186 struct folio *folio; in shmem_find_swap_entries() local
1190 xas_for_each(&xas, folio, ULONG_MAX) { in shmem_find_swap_entries()
1191 if (xas_retry(&xas, folio)) in shmem_find_swap_entries()
1194 if (!xa_is_value(folio)) in shmem_find_swap_entries()
1197 entry = radix_to_swp_entry(folio); in shmem_find_swap_entries()
1206 if (!folio_batch_add(fbatch, folio)) in shmem_find_swap_entries()
1232 struct folio *folio = fbatch->folios[i]; in shmem_unuse_swap_entries() local
1234 if (!xa_is_value(folio)) in shmem_unuse_swap_entries()
1237 &folio, SGP_CACHE, in shmem_unuse_swap_entries()
1241 folio_unlock(folio); in shmem_unuse_swap_entries()
1242 folio_put(folio); in shmem_unuse_swap_entries()
1331 struct folio *folio = page_folio(page); in shmem_writepage() local
1343 if (folio_test_large(folio)) { in shmem_writepage()
1345 folio_test_set_dirty(folio); in shmem_writepage()
1348 folio = page_folio(page); in shmem_writepage()
1349 folio_clear_dirty(folio); in shmem_writepage()
1352 BUG_ON(!folio_test_locked(folio)); in shmem_writepage()
1353 mapping = folio->mapping; in shmem_writepage()
1354 index = folio->index; in shmem_writepage()
1385 if (!folio_test_uptodate(folio)) { in shmem_writepage()
1401 folio_zero_range(folio, 0, folio_size(folio)); in shmem_writepage()
1402 flush_dcache_folio(folio); in shmem_writepage()
1403 folio_mark_uptodate(folio); in shmem_writepage()
1406 swap = folio_alloc_swap(folio); in shmem_writepage()
1422 if (add_to_swap_cache(folio, swap, in shmem_writepage()
1431 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); in shmem_writepage()
1434 BUG_ON(folio_mapped(folio)); in shmem_writepage()
1435 swap_writepage(&folio->page, wbc); in shmem_writepage()
1440 put_swap_folio(folio, swap); in shmem_writepage()
1442 folio_mark_dirty(folio); in shmem_writepage()
1445 folio_unlock(folio); in shmem_writepage()
1502 static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin()
1544 static struct folio *shmem_alloc_hugefolio(gfp_t gfp, in shmem_alloc_hugefolio()
1550 struct folio *folio; in shmem_alloc_hugefolio() local
1558 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); in shmem_alloc_hugefolio()
1560 if (!folio) in shmem_alloc_hugefolio()
1562 return folio; in shmem_alloc_hugefolio()
1565 static struct folio *shmem_alloc_folio(gfp_t gfp, in shmem_alloc_folio()
1569 struct folio *folio; in shmem_alloc_folio() local
1572 folio = vma_alloc_folio(gfp, 0, &pvma, 0, false); in shmem_alloc_folio()
1575 return folio; in shmem_alloc_folio()
1578 static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, in shmem_alloc_and_acct_folio()
1582 struct folio *folio; in shmem_alloc_and_acct_folio() local
1594 folio = shmem_alloc_hugefolio(gfp, info, index); in shmem_alloc_and_acct_folio()
1596 folio = shmem_alloc_folio(gfp, info, index); in shmem_alloc_and_acct_folio()
1597 if (folio) { in shmem_alloc_and_acct_folio()
1598 __folio_set_locked(folio); in shmem_alloc_and_acct_folio()
1599 __folio_set_swapbacked(folio); in shmem_alloc_and_acct_folio()
1600 return folio; in shmem_alloc_and_acct_folio()
1621 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) in shmem_should_replace_folio() argument
1623 return folio_zonenum(folio) > gfp_zone(gfp); in shmem_should_replace_folio()
1626 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, in shmem_replace_folio()
1629 struct folio *old, *new; in shmem_replace_folio()
1696 struct folio *folio, swp_entry_t swap) in shmem_set_folio_swapin_error() argument
1703 swapin_error = make_swapin_error_entry(&folio->page); in shmem_set_folio_swapin_error()
1710 folio_wait_writeback(folio); in shmem_set_folio_swapin_error()
1711 delete_from_swap_cache(folio); in shmem_set_folio_swapin_error()
1732 struct folio **foliop, enum sgp_type sgp, in shmem_swapin_folio()
1739 struct folio *folio = NULL; in shmem_swapin_folio() local
1751 folio = swap_cache_get_folio(swap, NULL, 0); in shmem_swapin_folio()
1752 if (!folio) { in shmem_swapin_folio()
1760 folio = shmem_swapin(swap, gfp, info, index); in shmem_swapin_folio()
1761 if (!folio) { in shmem_swapin_folio()
1768 folio_lock(folio); in shmem_swapin_folio()
1769 if (!folio_test_swapcache(folio) || in shmem_swapin_folio()
1770 folio_swap_entry(folio).val != swap.val || in shmem_swapin_folio()
1775 if (!folio_test_uptodate(folio)) { in shmem_swapin_folio()
1779 folio_wait_writeback(folio); in shmem_swapin_folio()
1785 arch_swap_restore(swap, folio); in shmem_swapin_folio()
1787 if (shmem_should_replace_folio(folio, gfp)) { in shmem_swapin_folio()
1788 error = shmem_replace_folio(&folio, gfp, info, index); in shmem_swapin_folio()
1793 error = shmem_add_to_page_cache(folio, mapping, index, in shmem_swapin_folio()
1805 folio_mark_accessed(folio); in shmem_swapin_folio()
1807 delete_from_swap_cache(folio); in shmem_swapin_folio()
1808 folio_mark_dirty(folio); in shmem_swapin_folio()
1811 *foliop = folio; in shmem_swapin_folio()
1817 shmem_set_folio_swapin_error(inode, index, folio, swap); in shmem_swapin_folio()
1819 if (folio) { in shmem_swapin_folio()
1820 folio_unlock(folio); in shmem_swapin_folio()
1821 folio_put(folio); in shmem_swapin_folio()
1838 struct folio **foliop, enum sgp_type sgp, gfp_t gfp, in shmem_get_folio_gfp()
1846 struct folio *folio; in shmem_get_folio_gfp() local
1864 folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0); in shmem_get_folio_gfp()
1865 if (folio && vma && userfaultfd_minor(vma)) { in shmem_get_folio_gfp()
1866 if (!xa_is_value(folio)) { in shmem_get_folio_gfp()
1867 folio_unlock(folio); in shmem_get_folio_gfp()
1868 folio_put(folio); in shmem_get_folio_gfp()
1874 if (xa_is_value(folio)) { in shmem_get_folio_gfp()
1875 error = shmem_swapin_folio(inode, index, &folio, in shmem_get_folio_gfp()
1880 *foliop = folio; in shmem_get_folio_gfp()
1884 if (folio) { in shmem_get_folio_gfp()
1885 hindex = folio->index; in shmem_get_folio_gfp()
1887 folio_mark_accessed(folio); in shmem_get_folio_gfp()
1888 if (folio_test_uptodate(folio)) in shmem_get_folio_gfp()
1893 folio_unlock(folio); in shmem_get_folio_gfp()
1894 folio_put(folio); in shmem_get_folio_gfp()
1921 folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); in shmem_get_folio_gfp()
1922 if (IS_ERR(folio)) { in shmem_get_folio_gfp()
1924 folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); in shmem_get_folio_gfp()
1926 if (IS_ERR(folio)) { in shmem_get_folio_gfp()
1929 error = PTR_ERR(folio); in shmem_get_folio_gfp()
1930 folio = NULL; in shmem_get_folio_gfp()
1949 hindex = round_down(index, folio_nr_pages(folio)); in shmem_get_folio_gfp()
1952 __folio_set_referenced(folio); in shmem_get_folio_gfp()
1954 error = shmem_add_to_page_cache(folio, mapping, hindex, in shmem_get_folio_gfp()
1959 folio_add_lru(folio); in shmem_get_folio_gfp()
1962 info->alloced += folio_nr_pages(folio); in shmem_get_folio_gfp()
1963 inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio); in shmem_get_folio_gfp()
1968 if (folio_test_pmd_mappable(folio) && in shmem_get_folio_gfp()
1970 folio_next_index(folio) - 1) { in shmem_get_folio_gfp()
1999 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { in shmem_get_folio_gfp()
2000 long i, n = folio_nr_pages(folio); in shmem_get_folio_gfp()
2003 clear_highpage(folio_page(folio, i)); in shmem_get_folio_gfp()
2004 flush_dcache_folio(folio); in shmem_get_folio_gfp()
2005 folio_mark_uptodate(folio); in shmem_get_folio_gfp()
2012 folio_clear_dirty(folio); in shmem_get_folio_gfp()
2013 filemap_remove_folio(folio); in shmem_get_folio_gfp()
2022 *foliop = folio; in shmem_get_folio_gfp()
2029 shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); in shmem_get_folio_gfp()
2031 if (folio_test_large(folio)) { in shmem_get_folio_gfp()
2032 folio_unlock(folio); in shmem_get_folio_gfp()
2033 folio_put(folio); in shmem_get_folio_gfp()
2037 if (folio) { in shmem_get_folio_gfp()
2038 folio_unlock(folio); in shmem_get_folio_gfp()
2039 folio_put(folio); in shmem_get_folio_gfp()
2052 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, in shmem_get_folio()
2076 struct folio *folio = NULL; in shmem_fault() local
2139 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, in shmem_fault()
2143 if (folio) in shmem_fault()
2144 vmf->page = folio_file_page(folio, vmf->pgoff); in shmem_fault()
2413 struct folio *folio; in shmem_mfill_atomic_pte() local
2432 folio = shmem_alloc_folio(gfp, info, pgoff); in shmem_mfill_atomic_pte()
2433 if (!folio) in shmem_mfill_atomic_pte()
2437 page_kaddr = kmap_local_folio(folio, 0); in shmem_mfill_atomic_pte()
2462 *pagep = &folio->page; in shmem_mfill_atomic_pte()
2468 flush_dcache_folio(folio); in shmem_mfill_atomic_pte()
2470 clear_user_highpage(&folio->page, dst_addr); in shmem_mfill_atomic_pte()
2473 folio = page_folio(*pagep); in shmem_mfill_atomic_pte()
2474 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); in shmem_mfill_atomic_pte()
2478 VM_BUG_ON(folio_test_locked(folio)); in shmem_mfill_atomic_pte()
2479 VM_BUG_ON(folio_test_swapbacked(folio)); in shmem_mfill_atomic_pte()
2480 __folio_set_locked(folio); in shmem_mfill_atomic_pte()
2481 __folio_set_swapbacked(folio); in shmem_mfill_atomic_pte()
2482 __folio_mark_uptodate(folio); in shmem_mfill_atomic_pte()
2489 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, in shmem_mfill_atomic_pte()
2495 &folio->page, true, wp_copy); in shmem_mfill_atomic_pte()
2505 folio_unlock(folio); in shmem_mfill_atomic_pte()
2508 filemap_remove_folio(folio); in shmem_mfill_atomic_pte()
2510 folio_unlock(folio); in shmem_mfill_atomic_pte()
2511 folio_put(folio); in shmem_mfill_atomic_pte()
2530 struct folio *folio; in shmem_write_begin() local
2542 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); in shmem_write_begin()
2547 *pagep = folio_file_page(folio, index); in shmem_write_begin()
2549 folio_unlock(folio); in shmem_write_begin()
2550 folio_put(folio); in shmem_write_begin()
2609 struct folio *folio = NULL; in shmem_file_read_iter() local
2624 error = shmem_get_folio(inode, index, &folio, SGP_READ); in shmem_file_read_iter()
2630 if (folio) { in shmem_file_read_iter()
2631 folio_unlock(folio); in shmem_file_read_iter()
2633 page = folio_file_page(folio, index); in shmem_file_read_iter()
2635 folio_put(folio); in shmem_file_read_iter()
2651 if (folio) in shmem_file_read_iter()
2652 folio_put(folio); in shmem_file_read_iter()
2658 if (folio) { in shmem_file_read_iter()
2670 folio_mark_accessed(folio); in shmem_file_read_iter()
2676 folio_put(folio); in shmem_file_read_iter()
2819 struct folio *folio; in shmem_fallocate() local
2830 error = shmem_get_folio(inode, index, &folio, in shmem_fallocate()
2848 index = folio_next_index(folio); in shmem_fallocate()
2857 if (!folio_test_uptodate(folio)) in shmem_fallocate()
2868 folio_mark_dirty(folio); in shmem_fallocate()
2869 folio_unlock(folio); in shmem_fallocate()
2870 folio_put(folio); in shmem_fallocate()
3130 struct folio *folio; in shmem_symlink() local
3158 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); in shmem_symlink()
3165 memcpy(folio_address(folio), symname, len); in shmem_symlink()
3166 folio_mark_uptodate(folio); in shmem_symlink()
3167 folio_mark_dirty(folio); in shmem_symlink()
3168 folio_unlock(folio); in shmem_symlink()
3169 folio_put(folio); in shmem_symlink()
3189 struct folio *folio = NULL; in shmem_get_link() local
3193 folio = filemap_get_folio(inode->i_mapping, 0); in shmem_get_link()
3194 if (!folio) in shmem_get_link()
3196 if (PageHWPoison(folio_page(folio, 0)) || in shmem_get_link()
3197 !folio_test_uptodate(folio)) { in shmem_get_link()
3198 folio_put(folio); in shmem_get_link()
3202 error = shmem_get_folio(inode, 0, &folio, SGP_READ); in shmem_get_link()
3205 if (!folio) in shmem_get_link()
3207 if (PageHWPoison(folio_page(folio, 0))) { in shmem_get_link()
3208 folio_unlock(folio); in shmem_get_link()
3209 folio_put(folio); in shmem_get_link()
3212 folio_unlock(folio); in shmem_get_link()
3214 set_delayed_call(done, shmem_put_link, folio); in shmem_get_link()
3215 return folio_address(folio); in shmem_get_link()
4314 struct folio *folio; in shmem_read_mapping_page_gfp() local
4319 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, in shmem_read_mapping_page_gfp()
4324 folio_unlock(folio); in shmem_read_mapping_page_gfp()
4325 page = folio_file_page(folio, index); in shmem_read_mapping_page_gfp()
4327 folio_put(folio); in shmem_read_mapping_page_gfp()