Lines Matching refs:page
120 struct page *page, void *shadow) in page_cache_delete() argument
122 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
128 if (!PageHuge(page)) { in page_cache_delete()
129 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
130 nr = compound_nr(page); in page_cache_delete()
133 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
134 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
135 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
140 page->mapping = NULL; in page_cache_delete()
157 struct page *page) in unaccount_page_cache_page() argument
166 if (PageUptodate(page) && PageMappedToDisk(page)) in unaccount_page_cache_page()
167 cleancache_put_page(page); in unaccount_page_cache_page()
169 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
171 VM_BUG_ON_PAGE(PageTail(page), page); in unaccount_page_cache_page()
172 VM_BUG_ON_PAGE(page_mapped(page), page); in unaccount_page_cache_page()
173 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { in unaccount_page_cache_page()
177 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
178 dump_page(page, "still mapped when deleted"); in unaccount_page_cache_page()
182 mapcount = page_mapcount(page); in unaccount_page_cache_page()
184 page_count(page) >= mapcount + 2) { in unaccount_page_cache_page()
191 page_mapcount_reset(page); in unaccount_page_cache_page()
192 page_ref_sub(page, mapcount); in unaccount_page_cache_page()
197 if (PageHuge(page)) in unaccount_page_cache_page()
200 nr = hpage_nr_pages(page); in unaccount_page_cache_page()
202 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
203 if (PageSwapBacked(page)) { in unaccount_page_cache_page()
204 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); in unaccount_page_cache_page()
205 if (PageTransHuge(page)) in unaccount_page_cache_page()
206 __dec_node_page_state(page, NR_SHMEM_THPS); in unaccount_page_cache_page()
207 } else if (PageTransHuge(page)) { in unaccount_page_cache_page()
208 __dec_node_page_state(page, NR_FILE_THPS); in unaccount_page_cache_page()
222 if (WARN_ON_ONCE(PageDirty(page))) in unaccount_page_cache_page()
223 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
231 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
233 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
235 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
237 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
238 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
242 struct page *page) in page_cache_free_page() argument
244 void (*freepage)(struct page *); in page_cache_free_page()
248 freepage(page); in page_cache_free_page()
250 if (PageTransHuge(page) && !PageHuge(page)) { in page_cache_free_page()
251 page_ref_sub(page, HPAGE_PMD_NR); in page_cache_free_page()
252 VM_BUG_ON_PAGE(page_count(page) <= 0, page); in page_cache_free_page()
254 put_page(page); in page_cache_free_page()
266 void delete_from_page_cache(struct page *page) in delete_from_page_cache() argument
268 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
271 BUG_ON(!PageLocked(page)); in delete_from_page_cache()
273 __delete_from_page_cache(page, NULL); in delete_from_page_cache()
276 page_cache_free_page(mapping, page); in delete_from_page_cache()
300 struct page *page; in page_cache_delete_batch() local
303 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
308 if (xa_is_value(page)) in page_cache_delete_batch()
317 if (page != pvec->pages[i]) { in page_cache_delete_batch()
318 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
319 page); in page_cache_delete_batch()
323 WARN_ON_ONCE(!PageLocked(page)); in page_cache_delete_batch()
325 if (page->index == xas.xa_index) in page_cache_delete_batch()
326 page->mapping = NULL; in page_cache_delete_batch()
334 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
475 struct page *page; in filemap_range_has_page() local
484 page = xas_find(&xas, max); in filemap_range_has_page()
485 if (xas_retry(&xas, page)) in filemap_range_has_page()
488 if (xa_is_value(page)) in filemap_range_has_page()
499 return page != NULL; in filemap_range_has_page()
524 struct page *page = pvec.pages[i]; in __filemap_fdatawait_range() local
526 wait_on_page_writeback(page); in __filemap_fdatawait_range()
527 ClearPageError(page); in __filemap_fdatawait_range()
811 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page()
814 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
850 static int __add_to_page_cache_locked(struct page *page, in __add_to_page_cache_locked() argument
856 int huge = PageHuge(page); in __add_to_page_cache_locked()
861 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_page_cache_locked()
862 VM_BUG_ON_PAGE(PageSwapBacked(page), page); in __add_to_page_cache_locked()
866 error = mem_cgroup_try_charge(page, current->mm, in __add_to_page_cache_locked()
872 get_page(page); in __add_to_page_cache_locked()
873 page->mapping = mapping; in __add_to_page_cache_locked()
874 page->index = offset; in __add_to_page_cache_locked()
881 xas_store(&xas, page); in __add_to_page_cache_locked()
894 __inc_node_page_state(page, NR_FILE_PAGES); in __add_to_page_cache_locked()
903 mem_cgroup_commit_charge(page, memcg, false, false); in __add_to_page_cache_locked()
904 trace_mm_filemap_add_to_page_cache(page); in __add_to_page_cache_locked()
907 page->mapping = NULL; in __add_to_page_cache_locked()
910 mem_cgroup_cancel_charge(page, memcg, false); in __add_to_page_cache_locked()
911 put_page(page); in __add_to_page_cache_locked()
928 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
931 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
936 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
942 __SetPageLocked(page); in add_to_page_cache_lru()
943 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
946 __ClearPageLocked(page); in add_to_page_cache_lru()
956 WARN_ON_ONCE(PageActive(page)); in add_to_page_cache_lru()
958 workingset_refault(page, shadow); in add_to_page_cache_lru()
959 lru_cache_add(page); in add_to_page_cache_lru()
966 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc()
969 struct page *page; in __page_cache_alloc() local
976 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
977 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); in __page_cache_alloc()
979 return page; in __page_cache_alloc()
1000 static wait_queue_head_t *page_waitqueue(struct page *page) in page_waitqueue() argument
1002 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; in page_waitqueue()
1017 struct page *page; member
1023 struct page *page; member
1034 if (wait_page->page != key->page) in wake_page_function()
1049 if (test_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1055 static void wake_up_page_bit(struct page *page, int bit_nr) in wake_up_page_bit() argument
1057 wait_queue_head_t *q = page_waitqueue(page); in wake_up_page_bit()
1062 key.page = page; in wake_up_page_bit()
1097 ClearPageWaiters(page); in wake_up_page_bit()
1109 static void wake_up_page(struct page *page, int bit) in wake_up_page() argument
1111 if (!PageWaiters(page)) in wake_up_page()
1113 wake_up_page_bit(page, bit); in wake_up_page()
1132 struct page *page, int bit_nr, int state, enum behavior behavior) in wait_on_page_bit_common() argument
1143 !PageUptodate(page) && PageWorkingset(page)) { in wait_on_page_bit_common()
1144 if (!PageSwapBacked(page)) { in wait_on_page_bit_common()
1155 wait_page.page = page; in wait_on_page_bit_common()
1163 SetPageWaiters(page); in wait_on_page_bit_common()
1170 bit_is_set = test_bit(bit_nr, &page->flags); in wait_on_page_bit_common()
1172 put_page(page); in wait_on_page_bit_common()
1178 if (!test_and_set_bit_lock(bit_nr, &page->flags)) in wait_on_page_bit_common()
1181 if (!test_bit(bit_nr, &page->flags)) in wait_on_page_bit_common()
1221 void wait_on_page_bit(struct page *page, int bit_nr) in wait_on_page_bit() argument
1223 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit()
1224 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in wait_on_page_bit()
1228 int wait_on_page_bit_killable(struct page *page, int bit_nr) in wait_on_page_bit_killable() argument
1230 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit_killable()
1231 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); in wait_on_page_bit_killable()
1245 void put_and_wait_on_page_locked(struct page *page) in put_and_wait_on_page_locked() argument
1249 page = compound_head(page); in put_and_wait_on_page_locked()
1250 q = page_waitqueue(page); in put_and_wait_on_page_locked()
1251 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); in put_and_wait_on_page_locked()
1261 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) in add_page_wait_queue() argument
1263 wait_queue_head_t *q = page_waitqueue(page); in add_page_wait_queue()
1268 SetPageWaiters(page); in add_page_wait_queue()
1311 void unlock_page(struct page *page) in unlock_page() argument
1314 page = compound_head(page); in unlock_page()
1315 VM_BUG_ON_PAGE(!PageLocked(page), page); in unlock_page()
1316 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) in unlock_page()
1317 wake_up_page_bit(page, PG_locked); in unlock_page()
1325 void end_page_writeback(struct page *page) in end_page_writeback() argument
1334 if (PageReclaim(page)) { in end_page_writeback()
1335 ClearPageReclaim(page); in end_page_writeback()
1336 rotate_reclaimable_page(page); in end_page_writeback()
1339 if (!test_clear_page_writeback(page)) in end_page_writeback()
1343 wake_up_page(page, PG_writeback); in end_page_writeback()
1351 void page_endio(struct page *page, bool is_write, int err) in page_endio() argument
1355 SetPageUptodate(page); in page_endio()
1357 ClearPageUptodate(page); in page_endio()
1358 SetPageError(page); in page_endio()
1360 unlock_page(page); in page_endio()
1365 SetPageError(page); in page_endio()
1366 mapping = page_mapping(page); in page_endio()
1370 end_page_writeback(page); in page_endio()
1379 void __lock_page(struct page *__page) in __lock_page()
1381 struct page *page = compound_head(__page); in __lock_page() local
1382 wait_queue_head_t *q = page_waitqueue(page); in __lock_page()
1383 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, in __lock_page()
1388 int __lock_page_killable(struct page *__page) in __lock_page_killable()
1390 struct page *page = compound_head(__page); in __lock_page_killable() local
1391 wait_queue_head_t *q = page_waitqueue(page); in __lock_page_killable()
1392 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, in __lock_page_killable()
1408 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, in __lock_page_or_retry() argument
1421 wait_on_page_locked_killable(page); in __lock_page_or_retry()
1423 wait_on_page_locked(page); in __lock_page_or_retry()
1429 ret = __lock_page_killable(page); in __lock_page_or_retry()
1435 __lock_page(page); in __lock_page_or_retry()
1525 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) in find_get_entry()
1528 struct page *page; in find_get_entry() local
1533 page = xas_load(&xas); in find_get_entry()
1534 if (xas_retry(&xas, page)) in find_get_entry()
1540 if (!page || xa_is_value(page)) in find_get_entry()
1543 if (!page_cache_get_speculative(page)) in find_get_entry()
1551 if (unlikely(page != xas_reload(&xas))) { in find_get_entry()
1552 put_page(page); in find_get_entry()
1555 page = find_subpage(page, offset); in find_get_entry()
1559 return page; in find_get_entry()
1579 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) in find_lock_entry()
1581 struct page *page; in find_lock_entry() local
1584 page = find_get_entry(mapping, offset); in find_lock_entry()
1585 if (page && !xa_is_value(page)) { in find_lock_entry()
1586 lock_page(page); in find_lock_entry()
1588 if (unlikely(page_mapping(page) != mapping)) { in find_lock_entry()
1589 unlock_page(page); in find_lock_entry()
1590 put_page(page); in find_lock_entry()
1593 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in find_lock_entry()
1595 return page; in find_lock_entry()
1629 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, in pagecache_get_page()
1632 struct page *page; in pagecache_get_page() local
1635 page = find_get_entry(mapping, offset); in pagecache_get_page()
1636 if (xa_is_value(page)) in pagecache_get_page()
1637 page = NULL; in pagecache_get_page()
1638 if (!page) in pagecache_get_page()
1643 if (!trylock_page(page)) { in pagecache_get_page()
1644 put_page(page); in pagecache_get_page()
1648 lock_page(page); in pagecache_get_page()
1652 if (unlikely(compound_head(page)->mapping != mapping)) { in pagecache_get_page()
1653 unlock_page(page); in pagecache_get_page()
1654 put_page(page); in pagecache_get_page()
1657 VM_BUG_ON_PAGE(page->index != offset, page); in pagecache_get_page()
1661 mark_page_accessed(page); in pagecache_get_page()
1664 if (!page && (fgp_flags & FGP_CREAT)) { in pagecache_get_page()
1671 page = __page_cache_alloc(gfp_mask); in pagecache_get_page()
1672 if (!page) in pagecache_get_page()
1680 __SetPageReferenced(page); in pagecache_get_page()
1682 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); in pagecache_get_page()
1684 put_page(page); in pagecache_get_page()
1685 page = NULL; in pagecache_get_page()
1694 if (page && (fgp_flags & FGP_FOR_MMAP)) in pagecache_get_page()
1695 unlock_page(page); in pagecache_get_page()
1698 return page; in pagecache_get_page()
1726 struct page **entries, pgoff_t *indices) in find_get_entries()
1729 struct page *page; in find_get_entries() local
1736 xas_for_each(&xas, page, ULONG_MAX) { in find_get_entries()
1737 if (xas_retry(&xas, page)) in find_get_entries()
1744 if (xa_is_value(page)) in find_get_entries()
1747 if (!page_cache_get_speculative(page)) in find_get_entries()
1751 if (unlikely(page != xas_reload(&xas))) in find_get_entries()
1753 page = find_subpage(page, xas.xa_index); in find_get_entries()
1757 entries[ret] = page; in find_get_entries()
1762 put_page(page); in find_get_entries()
1793 struct page **pages) in find_get_pages_range()
1796 struct page *page; in find_get_pages_range() local
1803 xas_for_each(&xas, page, end) { in find_get_pages_range()
1804 if (xas_retry(&xas, page)) in find_get_pages_range()
1807 if (xa_is_value(page)) in find_get_pages_range()
1810 if (!page_cache_get_speculative(page)) in find_get_pages_range()
1814 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range()
1817 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
1824 put_page(page); in find_get_pages_range()
1858 unsigned int nr_pages, struct page **pages) in find_get_pages_contig()
1861 struct page *page; in find_get_pages_contig() local
1868 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
1869 if (xas_retry(&xas, page)) in find_get_pages_contig()
1875 if (xa_is_value(page)) in find_get_pages_contig()
1878 if (!page_cache_get_speculative(page)) in find_get_pages_contig()
1882 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
1885 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
1890 put_page(page); in find_get_pages_contig()
1915 struct page **pages) in find_get_pages_range_tag()
1918 struct page *page; in find_get_pages_range_tag() local
1925 xas_for_each_marked(&xas, page, end, tag) { in find_get_pages_range_tag()
1926 if (xas_retry(&xas, page)) in find_get_pages_range_tag()
1933 if (xa_is_value(page)) in find_get_pages_range_tag()
1936 if (!page_cache_get_speculative(page)) in find_get_pages_range_tag()
1940 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range_tag()
1943 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range_tag()
1950 put_page(page); in find_get_pages_range_tag()
2035 struct page *page; in generic_file_buffered_read() local
2047 page = find_get_page(mapping, index); in generic_file_buffered_read()
2048 if (!page) { in generic_file_buffered_read()
2054 page = find_get_page(mapping, index); in generic_file_buffered_read()
2055 if (unlikely(page == NULL)) in generic_file_buffered_read()
2058 if (PageReadahead(page)) { in generic_file_buffered_read()
2060 ra, filp, page, in generic_file_buffered_read()
2063 if (!PageUptodate(page)) { in generic_file_buffered_read()
2065 put_page(page); in generic_file_buffered_read()
2074 error = wait_on_page_locked_killable(page); in generic_file_buffered_read()
2077 if (PageUptodate(page)) in generic_file_buffered_read()
2086 if (!trylock_page(page)) in generic_file_buffered_read()
2089 if (!page->mapping) in generic_file_buffered_read()
2091 if (!mapping->a_ops->is_partially_uptodate(page, in generic_file_buffered_read()
2094 unlock_page(page); in generic_file_buffered_read()
2109 put_page(page); in generic_file_buffered_read()
2118 put_page(page); in generic_file_buffered_read()
2129 flush_dcache_page(page); in generic_file_buffered_read()
2136 mark_page_accessed(page); in generic_file_buffered_read()
2144 ret = copy_page_to_iter(page, offset, nr, iter); in generic_file_buffered_read()
2150 put_page(page); in generic_file_buffered_read()
2162 error = lock_page_killable(page); in generic_file_buffered_read()
2168 if (!page->mapping) { in generic_file_buffered_read()
2169 unlock_page(page); in generic_file_buffered_read()
2170 put_page(page); in generic_file_buffered_read()
2175 if (PageUptodate(page)) { in generic_file_buffered_read()
2176 unlock_page(page); in generic_file_buffered_read()
2186 ClearPageError(page); in generic_file_buffered_read()
2188 error = mapping->a_ops->readpage(filp, page); in generic_file_buffered_read()
2192 put_page(page); in generic_file_buffered_read()
2199 if (!PageUptodate(page)) { in generic_file_buffered_read()
2200 error = lock_page_killable(page); in generic_file_buffered_read()
2203 if (!PageUptodate(page)) { in generic_file_buffered_read()
2204 if (page->mapping == NULL) { in generic_file_buffered_read()
2208 unlock_page(page); in generic_file_buffered_read()
2209 put_page(page); in generic_file_buffered_read()
2212 unlock_page(page); in generic_file_buffered_read()
2217 unlock_page(page); in generic_file_buffered_read()
2224 put_page(page); in generic_file_buffered_read()
2232 page = page_cache_alloc(mapping); in generic_file_buffered_read()
2233 if (!page) { in generic_file_buffered_read()
2237 error = add_to_page_cache_lru(page, mapping, index, in generic_file_buffered_read()
2240 put_page(page); in generic_file_buffered_read()
2364 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2367 if (trylock_page(page)) in lock_page_maybe_drop_mmap()
2380 if (__lock_page_killable(page)) { in lock_page_maybe_drop_mmap()
2392 __lock_page(page); in lock_page_maybe_drop_mmap()
2453 struct page *page) in do_async_mmap_readahead() argument
2466 if (PageReadahead(page)) { in do_async_mmap_readahead()
2469 page, offset, ra->ra_pages); in do_async_mmap_readahead()
2507 struct page *page; in filemap_fault() local
2517 page = find_get_page(mapping, offset); in filemap_fault()
2518 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { in filemap_fault()
2523 fpin = do_async_mmap_readahead(vmf, page); in filemap_fault()
2524 } else if (!page) { in filemap_fault()
2531 page = pagecache_get_page(mapping, offset, in filemap_fault()
2534 if (!page) { in filemap_fault()
2541 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) in filemap_fault()
2545 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
2546 unlock_page(page); in filemap_fault()
2547 put_page(page); in filemap_fault()
2550 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in filemap_fault()
2556 if (unlikely(!PageUptodate(page))) in filemap_fault()
2565 unlock_page(page); in filemap_fault()
2575 unlock_page(page); in filemap_fault()
2576 put_page(page); in filemap_fault()
2580 vmf->page = page; in filemap_fault()
2590 ClearPageError(page); in filemap_fault()
2592 error = mapping->a_ops->readpage(file, page); in filemap_fault()
2594 wait_on_page_locked(page); in filemap_fault()
2595 if (!PageUptodate(page)) in filemap_fault()
2600 put_page(page); in filemap_fault()
2615 if (page) in filemap_fault()
2616 put_page(page); in filemap_fault()
2631 struct page *page; in filemap_map_pages() local
2634 xas_for_each(&xas, page, end_pgoff) { in filemap_map_pages()
2635 if (xas_retry(&xas, page)) in filemap_map_pages()
2637 if (xa_is_value(page)) in filemap_map_pages()
2644 if (PageLocked(page)) in filemap_map_pages()
2646 if (!page_cache_get_speculative(page)) in filemap_map_pages()
2650 if (unlikely(page != xas_reload(&xas))) in filemap_map_pages()
2652 page = find_subpage(page, xas.xa_index); in filemap_map_pages()
2654 if (!PageUptodate(page) || in filemap_map_pages()
2655 PageReadahead(page) || in filemap_map_pages()
2656 PageHWPoison(page)) in filemap_map_pages()
2658 if (!trylock_page(page)) in filemap_map_pages()
2661 if (page->mapping != mapping || !PageUptodate(page)) in filemap_map_pages()
2665 if (page->index >= max_idx) in filemap_map_pages()
2675 if (alloc_set_pte(vmf, NULL, page)) in filemap_map_pages()
2677 unlock_page(page); in filemap_map_pages()
2680 unlock_page(page); in filemap_map_pages()
2682 put_page(page); in filemap_map_pages()
2694 struct page *page = vmf->page; in filemap_page_mkwrite() local
2700 lock_page(page); in filemap_page_mkwrite()
2701 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
2702 unlock_page(page); in filemap_page_mkwrite()
2711 set_page_dirty(page); in filemap_page_mkwrite()
2712 wait_for_stable_page(page); in filemap_page_mkwrite()
2765 static struct page *wait_on_page_read(struct page *page) in wait_on_page_read() argument
2767 if (!IS_ERR(page)) { in wait_on_page_read()
2768 wait_on_page_locked(page); in wait_on_page_read()
2769 if (!PageUptodate(page)) { in wait_on_page_read()
2770 put_page(page); in wait_on_page_read()
2771 page = ERR_PTR(-EIO); in wait_on_page_read()
2774 return page; in wait_on_page_read()
2777 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page()
2779 int (*filler)(void *, struct page *), in do_read_cache_page() argument
2783 struct page *page; in do_read_cache_page() local
2786 page = find_get_page(mapping, index); in do_read_cache_page()
2787 if (!page) { in do_read_cache_page()
2788 page = __page_cache_alloc(gfp); in do_read_cache_page()
2789 if (!page) in do_read_cache_page()
2791 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
2793 put_page(page); in do_read_cache_page()
2802 err = filler(data, page); in do_read_cache_page()
2804 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
2807 put_page(page); in do_read_cache_page()
2811 page = wait_on_page_read(page); in do_read_cache_page()
2812 if (IS_ERR(page)) in do_read_cache_page()
2813 return page; in do_read_cache_page()
2816 if (PageUptodate(page)) in do_read_cache_page()
2850 wait_on_page_locked(page); in do_read_cache_page()
2851 if (PageUptodate(page)) in do_read_cache_page()
2855 lock_page(page); in do_read_cache_page()
2858 if (!page->mapping) { in do_read_cache_page()
2859 unlock_page(page); in do_read_cache_page()
2860 put_page(page); in do_read_cache_page()
2865 if (PageUptodate(page)) { in do_read_cache_page()
2866 unlock_page(page); in do_read_cache_page()
2872 mark_page_accessed(page); in do_read_cache_page()
2873 return page; in do_read_cache_page()
2890 struct page *read_cache_page(struct address_space *mapping, in read_cache_page()
2892 int (*filler)(void *, struct page *), in read_cache_page() argument
2913 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp()
3145 struct page **pagep, void **fsdata) in pagecache_write_begin()
3156 struct page *page, void *fsdata) in pagecache_write_end() argument
3160 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3246 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin()
3249 struct page *page; in grab_cache_page_write_begin() local
3255 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3257 if (page) in grab_cache_page_write_begin()
3258 wait_for_stable_page(page); in grab_cache_page_write_begin()
3260 return page; in grab_cache_page_write_begin()
3274 struct page *page; in generic_perform_write() local
3306 &page, &fsdata); in generic_perform_write()
3311 flush_dcache_page(page); in generic_perform_write()
3313 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); in generic_perform_write()
3314 flush_dcache_page(page); in generic_perform_write()
3317 page, fsdata); in generic_perform_write()
3492 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
3494 struct address_space * const mapping = page->mapping; in try_to_release_page()
3496 BUG_ON(!PageLocked(page)); in try_to_release_page()
3497 if (PageWriteback(page)) in try_to_release_page()
3501 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
3502 return try_to_free_buffers(page); in try_to_release_page()