Lines Matching refs:folio
127 struct folio *folio, void *shadow) in page_cache_delete() argument
129 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
135 if (!folio_test_hugetlb(folio)) { in page_cache_delete()
136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
137 nr = folio_nr_pages(folio); in page_cache_delete()
140 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete()
145 folio->mapping = NULL; in page_cache_delete()
151 struct folio *folio) in filemap_unaccount_folio() argument
155 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio()
156 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio()
158 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
159 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
163 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
164 int mapcount = page_mapcount(&folio->page); in filemap_unaccount_folio()
166 if (folio_ref_count(folio) >= mapcount + 2) { in filemap_unaccount_folio()
173 page_mapcount_reset(&folio->page); in filemap_unaccount_folio()
174 folio_ref_sub(folio, mapcount); in filemap_unaccount_folio()
180 if (folio_test_hugetlb(folio)) in filemap_unaccount_folio()
183 nr = folio_nr_pages(folio); in filemap_unaccount_folio()
185 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
186 if (folio_test_swapbacked(folio)) { in filemap_unaccount_folio()
187 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
188 if (folio_test_pmd_mappable(folio)) in filemap_unaccount_folio()
189 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
190 } else if (folio_test_pmd_mappable(folio)) { in filemap_unaccount_folio()
191 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
209 if (WARN_ON_ONCE(folio_test_dirty(folio) && in filemap_unaccount_folio()
211 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
219 void __filemap_remove_folio(struct folio *folio, void *shadow) in __filemap_remove_folio() argument
221 struct address_space *mapping = folio->mapping; in __filemap_remove_folio()
223 trace_mm_filemap_delete_from_page_cache(folio); in __filemap_remove_folio()
224 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
225 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
228 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
230 void (*free_folio)(struct folio *); in filemap_free_folio()
235 free_folio(folio); in filemap_free_folio()
237 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) in filemap_free_folio()
238 refs = folio_nr_pages(folio); in filemap_free_folio()
239 folio_put_refs(folio, refs); in filemap_free_folio()
250 void filemap_remove_folio(struct folio *folio) in filemap_remove_folio() argument
252 struct address_space *mapping = folio->mapping; in filemap_remove_folio()
254 BUG_ON(!folio_test_locked(folio)); in filemap_remove_folio()
257 __filemap_remove_folio(folio, NULL); in filemap_remove_folio()
263 filemap_free_folio(mapping, folio); in filemap_remove_folio()
285 struct folio *folio; in page_cache_delete_batch() local
288 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch()
293 if (xa_is_value(folio)) in page_cache_delete_batch()
302 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
303 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
304 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
308 WARN_ON_ONCE(!folio_test_locked(folio)); in page_cache_delete_batch()
310 folio->mapping = NULL; in page_cache_delete_batch()
315 total_pages += folio_nr_pages(folio); in page_cache_delete_batch()
331 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() local
333 trace_mm_filemap_delete_from_page_cache(folio); in delete_from_page_cache_batch()
334 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
475 struct folio *folio; in filemap_range_has_page() local
484 folio = xas_find(&xas, max); in filemap_range_has_page()
485 if (xas_retry(&xas, folio)) in filemap_range_has_page()
488 if (xa_is_value(folio)) in filemap_range_has_page()
499 return folio != NULL; in filemap_range_has_page()
523 struct folio *folio = fbatch.folios[i]; in __filemap_fdatawait_range() local
525 folio_wait_writeback(folio); in __filemap_fdatawait_range()
526 folio_clear_error(folio); in __filemap_fdatawait_range()
636 struct folio *folio; in filemap_range_has_writeback() local
642 xas_for_each(&xas, folio, max) { in filemap_range_has_writeback()
643 if (xas_retry(&xas, folio)) in filemap_range_has_writeback()
645 if (xa_is_value(folio)) in filemap_range_has_writeback()
647 if (folio_test_dirty(folio) || folio_test_locked(folio) || in filemap_range_has_writeback()
648 folio_test_writeback(folio)) in filemap_range_has_writeback()
652 return folio != NULL; in filemap_range_has_writeback()
807 void replace_page_cache_folio(struct folio *old, struct folio *new) in replace_page_cache_folio()
810 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
845 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) in __filemap_add_folio() argument
848 int huge = folio_test_hugetlb(folio); in __filemap_add_folio()
852 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in __filemap_add_folio()
853 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); in __filemap_add_folio()
857 int error = mem_cgroup_charge(folio, NULL, gfp); in __filemap_add_folio()
858 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
862 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio()
863 nr = folio_nr_pages(folio); in __filemap_add_folio()
867 folio_ref_add(folio, nr); in __filemap_add_folio()
868 folio->mapping = mapping; in __filemap_add_folio()
869 folio->index = xas.xa_index; in __filemap_add_folio()
875 if (order > folio_order(folio)) in __filemap_add_folio()
892 if (order > folio_order(folio)) { in __filemap_add_folio()
900 xas_store(&xas, folio); in __filemap_add_folio()
908 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); in __filemap_add_folio()
909 if (folio_test_pmd_mappable(folio)) in __filemap_add_folio()
910 __lruvec_stat_mod_folio(folio, in __filemap_add_folio()
920 trace_mm_filemap_add_to_page_cache(folio); in __filemap_add_folio()
924 mem_cgroup_uncharge(folio); in __filemap_add_folio()
925 folio->mapping = NULL; in __filemap_add_folio()
927 folio_put_refs(folio, nr); in __filemap_add_folio()
932 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
938 __folio_set_locked(folio); in filemap_add_folio()
939 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
941 __folio_clear_locked(folio); in filemap_add_folio()
951 WARN_ON_ONCE(folio_test_active(folio)); in filemap_add_folio()
953 workingset_refault(folio, shadow); in filemap_add_folio()
954 folio_add_lru(folio); in filemap_add_folio()
961 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) in filemap_alloc_folio()
964 struct folio *folio; in filemap_alloc_folio() local
971 folio = __folio_alloc_node(gfp, order, n); in filemap_alloc_folio()
972 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); in filemap_alloc_folio()
974 return folio; in filemap_alloc_folio()
1033 static wait_queue_head_t *folio_waitqueue(struct folio *folio) in folio_waitqueue() argument
1035 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; in folio_waitqueue()
1098 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1101 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1133 static void folio_wake_bit(struct folio *folio, int bit_nr) in folio_wake_bit() argument
1135 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wake_bit()
1140 key.folio = folio; in folio_wake_bit()
1175 folio_clear_waiters(folio); in folio_wake_bit()
1180 static void folio_wake(struct folio *folio, int bit) in folio_wake() argument
1182 if (!folio_test_waiters(folio)) in folio_wake()
1184 folio_wake_bit(folio, bit); in folio_wake()
1206 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, in folio_trylock_flag() argument
1210 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1212 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1222 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, in folio_wait_bit_common() argument
1225 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wait_bit_common()
1234 !folio_test_uptodate(folio) && folio_test_workingset(folio)) { in folio_wait_bit_common()
1242 wait_page.folio = folio; in folio_wait_bit_common()
1268 folio_set_waiters(folio); in folio_wait_bit_common()
1269 if (!folio_trylock_flag(folio, bit_nr, wait)) in folio_wait_bit_common()
1282 folio_put(folio); in folio_wait_bit_common()
1319 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) in folio_wait_bit_common()
1384 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); in migration_entry_wait_on_locked() local
1386 q = folio_waitqueue(folio); in migration_entry_wait_on_locked()
1387 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { in migration_entry_wait_on_locked()
1395 wait_page.folio = folio; in migration_entry_wait_on_locked()
1400 folio_set_waiters(folio); in migration_entry_wait_on_locked()
1401 if (!folio_trylock_flag(folio, PG_locked, wait)) in migration_entry_wait_on_locked()
1438 void folio_wait_bit(struct folio *folio, int bit_nr) in folio_wait_bit() argument
1440 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in folio_wait_bit()
1444 int folio_wait_bit_killable(struct folio *folio, int bit_nr) in folio_wait_bit_killable() argument
1446 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); in folio_wait_bit_killable()
1463 static int folio_put_wait_locked(struct folio *folio, int state) in folio_put_wait_locked() argument
1465 return folio_wait_bit_common(folio, PG_locked, state, DROP); in folio_put_wait_locked()
1475 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) in folio_add_wait_queue() argument
1477 wait_queue_head_t *q = folio_waitqueue(folio); in folio_add_wait_queue()
1482 folio_set_waiters(folio); in folio_add_wait_queue()
1519 void folio_unlock(struct folio *folio) in folio_unlock() argument
1524 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_unlock()
1525 if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) in folio_unlock()
1526 folio_wake_bit(folio, PG_locked); in folio_unlock()
1541 void folio_end_private_2(struct folio *folio) in folio_end_private_2() argument
1543 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); in folio_end_private_2()
1544 clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); in folio_end_private_2()
1545 folio_wake_bit(folio, PG_private_2); in folio_end_private_2()
1546 folio_put(folio); in folio_end_private_2()
1556 void folio_wait_private_2(struct folio *folio) in folio_wait_private_2() argument
1558 while (folio_test_private_2(folio)) in folio_wait_private_2()
1559 folio_wait_bit(folio, PG_private_2); in folio_wait_private_2()
1574 int folio_wait_private_2_killable(struct folio *folio) in folio_wait_private_2_killable() argument
1578 while (folio_test_private_2(folio)) { in folio_wait_private_2_killable()
1579 ret = folio_wait_bit_killable(folio, PG_private_2); in folio_wait_private_2_killable()
1592 void folio_end_writeback(struct folio *folio) in folio_end_writeback() argument
1601 if (folio_test_reclaim(folio)) { in folio_end_writeback()
1602 folio_clear_reclaim(folio); in folio_end_writeback()
1603 folio_rotate_reclaimable(folio); in folio_end_writeback()
1612 folio_get(folio); in folio_end_writeback()
1613 if (!__folio_end_writeback(folio)) in folio_end_writeback()
1617 folio_wake(folio, PG_writeback); in folio_end_writeback()
1618 acct_reclaim_writeback(folio); in folio_end_writeback()
1619 folio_put(folio); in folio_end_writeback()
1627 void __folio_lock(struct folio *folio) in __folio_lock() argument
1629 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, in __folio_lock()
1634 int __folio_lock_killable(struct folio *folio) in __folio_lock_killable() argument
1636 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, in __folio_lock_killable()
1641 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) in __folio_lock_async() argument
1643 struct wait_queue_head *q = folio_waitqueue(folio); in __folio_lock_async()
1646 wait->folio = folio; in __folio_lock_async()
1651 folio_set_waiters(folio); in __folio_lock_async()
1652 ret = !folio_trylock(folio); in __folio_lock_async()
1678 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) in __folio_lock_or_retry() argument
1692 folio_wait_locked_killable(folio); in __folio_lock_or_retry()
1694 folio_wait_locked(folio); in __folio_lock_or_retry()
1700 ret = __folio_lock_killable(folio); in __folio_lock_or_retry()
1706 __folio_lock(folio); in __folio_lock_or_retry()
1819 struct folio *folio; in filemap_get_entry() local
1824 folio = xas_load(&xas); in filemap_get_entry()
1825 if (xas_retry(&xas, folio)) in filemap_get_entry()
1831 if (!folio || xa_is_value(folio)) in filemap_get_entry()
1834 if (!folio_try_get_rcu(folio)) in filemap_get_entry()
1837 if (unlikely(folio != xas_reload(&xas))) { in filemap_get_entry()
1838 folio_put(folio); in filemap_get_entry()
1844 return folio; in filemap_get_entry()
1863 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio()
1866 struct folio *folio; in __filemap_get_folio() local
1869 folio = filemap_get_entry(mapping, index); in __filemap_get_folio()
1870 if (xa_is_value(folio)) in __filemap_get_folio()
1871 folio = NULL; in __filemap_get_folio()
1872 if (!folio) in __filemap_get_folio()
1877 if (!folio_trylock(folio)) { in __filemap_get_folio()
1878 folio_put(folio); in __filemap_get_folio()
1882 folio_lock(folio); in __filemap_get_folio()
1886 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1887 folio_unlock(folio); in __filemap_get_folio()
1888 folio_put(folio); in __filemap_get_folio()
1891 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in __filemap_get_folio()
1895 folio_mark_accessed(folio); in __filemap_get_folio()
1898 if (folio_test_idle(folio)) in __filemap_get_folio()
1899 folio_clear_idle(folio); in __filemap_get_folio()
1903 folio_wait_stable(folio); in __filemap_get_folio()
1905 if (!folio && (fgp_flags & FGP_CREAT)) { in __filemap_get_folio()
1936 folio = filemap_alloc_folio(alloc_gfp, order); in __filemap_get_folio()
1937 if (!folio) in __filemap_get_folio()
1942 __folio_set_referenced(folio); in __filemap_get_folio()
1944 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
1947 folio_put(folio); in __filemap_get_folio()
1948 folio = NULL; in __filemap_get_folio()
1959 if (folio && (fgp_flags & FGP_FOR_MMAP)) in __filemap_get_folio()
1960 folio_unlock(folio); in __filemap_get_folio()
1963 if (!folio) in __filemap_get_folio()
1965 return folio; in __filemap_get_folio()
1969 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry()
1972 struct folio *folio; in find_get_entry() local
1976 folio = xas_find(xas, max); in find_get_entry()
1978 folio = xas_find_marked(xas, max, mark); in find_get_entry()
1980 if (xas_retry(xas, folio)) in find_get_entry()
1987 if (!folio || xa_is_value(folio)) in find_get_entry()
1988 return folio; in find_get_entry()
1990 if (!folio_try_get_rcu(folio)) in find_get_entry()
1993 if (unlikely(folio != xas_reload(xas))) { in find_get_entry()
1994 folio_put(folio); in find_get_entry()
1998 return folio; in find_get_entry()
2028 struct folio *folio; in find_get_entries() local
2031 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in find_get_entries()
2033 if (!folio_batch_add(fbatch, folio)) in find_get_entries()
2042 folio = fbatch->folios[idx]; in find_get_entries()
2043 if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) in find_get_entries()
2044 nr = folio_nr_pages(folio); in find_get_entries()
2074 struct folio *folio; in find_lock_entries() local
2077 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2078 if (!xa_is_value(folio)) { in find_lock_entries()
2079 if (folio->index < *start) in find_lock_entries()
2081 if (folio_next_index(folio) - 1 > end) in find_lock_entries()
2083 if (!folio_trylock(folio)) in find_lock_entries()
2085 if (folio->mapping != mapping || in find_lock_entries()
2086 folio_test_writeback(folio)) in find_lock_entries()
2088 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries()
2089 folio); in find_lock_entries()
2092 if (!folio_batch_add(fbatch, folio)) in find_lock_entries()
2096 folio_unlock(folio); in find_lock_entries()
2098 folio_put(folio); in find_lock_entries()
2106 folio = fbatch->folios[idx]; in find_lock_entries()
2107 if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) in find_lock_entries()
2108 nr = folio_nr_pages(folio); in find_lock_entries()
2139 struct folio *folio; in filemap_get_folios() local
2142 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in filemap_get_folios()
2144 if (xa_is_value(folio)) in filemap_get_folios()
2146 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios()
2147 unsigned long nr = folio_nr_pages(folio); in filemap_get_folios()
2149 if (folio_test_hugetlb(folio)) in filemap_get_folios()
2151 *start = folio->index + nr; in filemap_get_folios()
2193 struct folio *folio; in filemap_get_folios_contig() local
2197 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig()
2198 folio = xas_next(&xas)) { in filemap_get_folios_contig()
2199 if (xas_retry(&xas, folio)) in filemap_get_folios_contig()
2205 if (xa_is_value(folio)) in filemap_get_folios_contig()
2208 if (!folio_try_get_rcu(folio)) in filemap_get_folios_contig()
2211 if (unlikely(folio != xas_reload(&xas))) in filemap_get_folios_contig()
2214 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios_contig()
2215 nr = folio_nr_pages(folio); in filemap_get_folios_contig()
2217 if (folio_test_hugetlb(folio)) in filemap_get_folios_contig()
2219 *start = folio->index + nr; in filemap_get_folios_contig()
2224 folio_put(folio); in filemap_get_folios_contig()
2234 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2235 if (folio_test_hugetlb(folio)) in filemap_get_folios_contig()
2236 *start = folio->index + 1; in filemap_get_folios_contig()
2238 *start = folio_next_index(folio); in filemap_get_folios_contig()
2263 struct folio *folio; in filemap_get_folios_tag() local
2266 while ((folio = find_get_entry(&xas, end, tag)) != NULL) { in filemap_get_folios_tag()
2272 if (xa_is_value(folio)) in filemap_get_folios_tag()
2274 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios_tag()
2275 unsigned long nr = folio_nr_pages(folio); in filemap_get_folios_tag()
2277 if (folio_test_hugetlb(folio)) in filemap_get_folios_tag()
2279 *start = folio->index + nr; in filemap_get_folios_tag()
2333 struct folio *folio; in filemap_get_read_batch() local
2336 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { in filemap_get_read_batch()
2337 if (xas_retry(&xas, folio)) in filemap_get_read_batch()
2339 if (xas.xa_index > max || xa_is_value(folio)) in filemap_get_read_batch()
2341 if (xa_is_sibling(folio)) in filemap_get_read_batch()
2343 if (!folio_try_get_rcu(folio)) in filemap_get_read_batch()
2346 if (unlikely(folio != xas_reload(&xas))) in filemap_get_read_batch()
2349 if (!folio_batch_add(fbatch, folio)) in filemap_get_read_batch()
2351 if (!folio_test_uptodate(folio)) in filemap_get_read_batch()
2353 if (folio_test_readahead(folio)) in filemap_get_read_batch()
2355 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_read_batch()
2358 folio_put(folio); in filemap_get_read_batch()
2366 struct folio *folio) in filemap_read_folio() argument
2368 bool workingset = folio_test_workingset(folio); in filemap_read_folio()
2377 folio_clear_error(folio); in filemap_read_folio()
2382 error = filler(file, folio); in filemap_read_folio()
2388 error = folio_wait_locked_killable(folio); in filemap_read_folio()
2391 if (folio_test_uptodate(folio)) in filemap_read_folio()
2399 loff_t pos, size_t count, struct folio *folio, in filemap_range_uptodate() argument
2402 if (folio_test_uptodate(folio)) in filemap_range_uptodate()
2409 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2412 if (folio_pos(folio) > pos) { in filemap_range_uptodate()
2413 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2416 pos -= folio_pos(folio); in filemap_range_uptodate()
2419 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2424 struct folio *folio, bool need_uptodate) in filemap_update_page() argument
2435 if (!folio_trylock(folio)) { in filemap_update_page()
2445 folio_put_wait_locked(folio, TASK_KILLABLE); in filemap_update_page()
2448 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2454 if (!folio->mapping) in filemap_update_page()
2458 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2467 folio); in filemap_update_page()
2470 folio_unlock(folio); in filemap_update_page()
2474 folio_put(folio); in filemap_update_page()
2482 struct folio *folio; in filemap_create_folio() local
2485 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); in filemap_create_folio()
2486 if (!folio) in filemap_create_folio()
2503 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2510 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2515 folio_batch_add(fbatch, folio); in filemap_create_folio()
2519 folio_put(folio); in filemap_create_folio()
2524 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2527 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2531 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2543 struct folio *folio; in filemap_get_pages() local
2570 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2571 if (folio_test_readahead(folio)) { in filemap_get_pages()
2572 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2576 if (!folio_test_uptodate(folio)) { in filemap_get_pages()
2580 err = filemap_update_page(iocb, mapping, count, folio, in filemap_get_pages()
2589 folio_put(folio); in filemap_get_pages()
2597 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) in pos_same_folio() argument
2599 unsigned int shift = folio_shift(folio); in pos_same_folio()
2684 struct folio *folio = fbatch.folios[i]; in filemap_read() local
2685 size_t fsize = folio_size(folio); in filemap_read()
2691 if (end_offset < folio_pos(folio)) in filemap_read()
2694 folio_mark_accessed(folio); in filemap_read()
2701 flush_dcache_folio(folio); in filemap_read()
2703 copied = copy_folio_to_iter(folio, offset, bytes, iter); in filemap_read()
2839 struct folio *folio, loff_t fpos, size_t size) in splice_folio_into_pipe() argument
2842 size_t spliced = 0, offset = offset_in_folio(folio, fpos); in splice_folio_into_pipe()
2844 page = folio_page(folio, offset / PAGE_SIZE); in splice_folio_into_pipe()
2845 size = min(size, folio_size(folio) - offset); in splice_folio_into_pipe()
2859 folio_get(folio); in splice_folio_into_pipe()
2943 struct folio *folio = fbatch.folios[i]; in filemap_splice_read() local
2946 if (folio_pos(folio) >= end_offset) in filemap_splice_read()
2948 folio_mark_accessed(folio); in filemap_splice_read()
2956 flush_dcache_folio(folio); in filemap_splice_read()
2959 n = splice_folio_into_pipe(pipe, folio, *ppos, n); in filemap_splice_read()
2982 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
2988 if (xa_is_value(folio) || folio_test_uptodate(folio)) in folio_seek_hole_data()
2995 folio_lock(folio); in folio_seek_hole_data()
2996 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
2999 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
3002 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
3007 } while (offset < folio_size(folio)); in folio_seek_hole_data()
3009 folio_unlock(folio); in folio_seek_hole_data()
3014 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) in seek_folio_size() argument
3016 if (xa_is_value(folio)) in seek_folio_size()
3018 return folio_size(folio); in seek_folio_size()
3045 struct folio *folio; in mapping_seek_hole_data() local
3051 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
3061 seek_size = seek_folio_size(&xas, folio); in mapping_seek_hole_data()
3063 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3071 if (!xa_is_value(folio)) in mapping_seek_hole_data()
3072 folio_put(folio); in mapping_seek_hole_data()
3078 if (folio && !xa_is_value(folio)) in mapping_seek_hole_data()
3079 folio_put(folio); in mapping_seek_hole_data()
3099 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3102 if (folio_trylock(folio)) in lock_folio_maybe_drop_mmap()
3115 if (__folio_lock_killable(folio)) { in lock_folio_maybe_drop_mmap()
3127 __folio_lock(folio); in lock_folio_maybe_drop_mmap()
3209 struct folio *folio) in do_async_mmap_readahead() argument
3225 if (folio_test_readahead(folio)) { in do_async_mmap_readahead()
3227 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3263 struct folio *folio; in filemap_fault() local
3274 folio = filemap_get_folio(mapping, index); in filemap_fault()
3275 if (likely(!IS_ERR(folio))) { in filemap_fault()
3281 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3282 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3301 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3304 if (IS_ERR(folio)) { in filemap_fault()
3312 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3316 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3317 folio_unlock(folio); in filemap_fault()
3318 folio_put(folio); in filemap_fault()
3321 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in filemap_fault()
3327 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3335 folio_unlock(folio); in filemap_fault()
3336 folio_put(folio); in filemap_fault()
3348 folio_unlock(folio); in filemap_fault()
3360 folio_unlock(folio); in filemap_fault()
3361 folio_put(folio); in filemap_fault()
3365 vmf->page = folio_file_page(folio, index); in filemap_fault()
3376 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3379 folio_put(folio); in filemap_fault()
3393 if (!IS_ERR(folio)) in filemap_fault()
3394 folio_put(folio); in filemap_fault()
3403 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, in filemap_map_pmd() argument
3410 folio_unlock(folio); in filemap_map_pmd()
3411 folio_put(folio); in filemap_map_pmd()
3415 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3416 struct page *page = folio_file_page(folio, start); in filemap_map_pmd()
3420 folio_unlock(folio); in filemap_map_pmd()
3431 static struct folio *next_uptodate_folio(struct xa_state *xas, in next_uptodate_folio()
3434 struct folio *folio = xas_next_entry(xas, end_pgoff); in next_uptodate_folio() local
3438 if (!folio) in next_uptodate_folio()
3440 if (xas_retry(xas, folio)) in next_uptodate_folio()
3442 if (xa_is_value(folio)) in next_uptodate_folio()
3444 if (folio_test_locked(folio)) in next_uptodate_folio()
3446 if (!folio_try_get_rcu(folio)) in next_uptodate_folio()
3449 if (unlikely(folio != xas_reload(xas))) in next_uptodate_folio()
3451 if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) in next_uptodate_folio()
3453 if (!folio_trylock(folio)) in next_uptodate_folio()
3455 if (folio->mapping != mapping) in next_uptodate_folio()
3457 if (!folio_test_uptodate(folio)) in next_uptodate_folio()
3462 return folio; in next_uptodate_folio()
3464 folio_unlock(folio); in next_uptodate_folio()
3466 folio_put(folio); in next_uptodate_folio()
3467 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_folio()
3477 struct folio *folio, unsigned long start, in filemap_map_folio_range() argument
3482 struct page *page = folio_page(folio, start); in filemap_map_folio_range()
3504 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3505 folio_ref_add(folio, count); in filemap_map_folio_range()
3518 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3519 folio_ref_add(folio, count); in filemap_map_folio_range()
3530 struct folio *folio, unsigned long addr, in filemap_map_order0_folio() argument
3534 struct page *page = &folio->page; in filemap_map_order0_folio()
3552 set_pte_range(vmf, folio, page, 1, addr); in filemap_map_order0_folio()
3553 folio_ref_inc(folio); in filemap_map_order0_folio()
3567 struct folio *folio; in filemap_map_pages() local
3572 folio = next_uptodate_folio(&xas, mapping, end_pgoff); in filemap_map_pages()
3573 if (!folio) in filemap_map_pages()
3576 if (filemap_map_pmd(vmf, folio, start_pgoff)) { in filemap_map_pages()
3584 folio_unlock(folio); in filemap_map_pages()
3585 folio_put(folio); in filemap_map_pages()
3594 end = folio->index + folio_nr_pages(folio) - 1; in filemap_map_pages()
3597 if (!folio_test_large(folio)) in filemap_map_pages()
3599 folio, addr, &mmap_miss); in filemap_map_pages()
3601 ret |= filemap_map_folio_range(vmf, folio, in filemap_map_pages()
3602 xas.xa_index - folio->index, addr, in filemap_map_pages()
3605 folio_unlock(folio); in filemap_map_pages()
3606 folio_put(folio); in filemap_map_pages()
3607 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); in filemap_map_pages()
3625 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite() local
3630 folio_lock(folio); in filemap_page_mkwrite()
3631 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3632 folio_unlock(folio); in filemap_page_mkwrite()
3641 folio_mark_dirty(folio); in filemap_page_mkwrite()
3642 folio_wait_stable(folio); in filemap_page_mkwrite()
3695 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio()
3698 struct folio *folio; in do_read_cache_folio() local
3704 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3705 if (IS_ERR(folio)) { in do_read_cache_folio()
3706 folio = filemap_alloc_folio(gfp, 0); in do_read_cache_folio()
3707 if (!folio) in do_read_cache_folio()
3709 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3711 folio_put(folio); in do_read_cache_folio()
3720 if (folio_test_uptodate(folio)) in do_read_cache_folio()
3723 if (!folio_trylock(folio)) { in do_read_cache_folio()
3724 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); in do_read_cache_folio()
3729 if (!folio->mapping) { in do_read_cache_folio()
3730 folio_unlock(folio); in do_read_cache_folio()
3731 folio_put(folio); in do_read_cache_folio()
3736 if (folio_test_uptodate(folio)) { in do_read_cache_folio()
3737 folio_unlock(folio); in do_read_cache_folio()
3742 err = filemap_read_folio(file, filler, folio); in do_read_cache_folio()
3744 folio_put(folio); in do_read_cache_folio()
3751 folio_mark_accessed(folio); in do_read_cache_folio()
3752 return folio; in do_read_cache_folio()
3771 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio()
3796 struct folio *mapping_read_folio_gfp(struct address_space *mapping, in mapping_read_folio_gfp()
3806 struct folio *folio; in do_read_cache_page() local
3808 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3809 if (IS_ERR(folio)) in do_read_cache_page()
3810 return &folio->page; in do_read_cache_page()
3811 return folio_file_page(folio, index); in do_read_cache_page()
4116 bool filemap_release_folio(struct folio *folio, gfp_t gfp) in filemap_release_folio() argument
4118 struct address_space * const mapping = folio->mapping; in filemap_release_folio()
4120 BUG_ON(!folio_test_locked(folio)); in filemap_release_folio()
4121 if (!folio_needs_release(folio)) in filemap_release_folio()
4123 if (folio_test_writeback(folio)) in filemap_release_folio()
4127 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4128 return try_to_free_buffers(folio); in filemap_release_folio()
4149 struct folio *folio; in filemap_cachestat() local
4152 xas_for_each(&xas, folio, last_index) { in filemap_cachestat()
4156 if (xas_retry(&xas, folio)) in filemap_cachestat()
4159 if (xa_is_value(folio)) { in filemap_cachestat()
4161 void *shadow = (void *)folio; in filemap_cachestat()
4181 swp_entry_t swp = radix_to_swp_entry(folio); in filemap_cachestat()
4192 nr_pages = folio_nr_pages(folio); in filemap_cachestat()
4193 folio_first_index = folio_pgoff(folio); in filemap_cachestat()
4206 if (folio_test_dirty(folio)) in filemap_cachestat()
4209 if (folio_test_writeback(folio)) in filemap_cachestat()