Lines Matching full:mapping
124 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
127 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
130 mapping_set_update(&xas, mapping); in page_cache_delete()
145 page->mapping = NULL; in page_cache_delete()
147 mapping->nrpages -= nr; in page_cache_delete()
150 static void unaccount_page_cache_page(struct address_space *mapping, in unaccount_page_cache_page() argument
163 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
177 if (mapping_exiting(mapping) && in unaccount_page_cache_page()
203 filemap_nr_thps_dec(mapping); in unaccount_page_cache_page()
217 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
227 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local
231 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
232 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
235 static void page_cache_free_page(struct address_space *mapping, in page_cache_free_page() argument
240 freepage = mapping->a_ops->freepage; in page_cache_free_page()
262 struct address_space *mapping = page_mapping(page); in delete_from_page_cache() local
265 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache()
267 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache()
269 page_cache_free_page(mapping, page); in delete_from_page_cache()
275 * @mapping: the mapping to which pages belong
278 * The function walks over mapping->i_pages and removes pages passed in @pvec
279 * from the mapping. The function expects @pvec to be sorted by page index
281 * It tolerates holes in @pvec (mapping entries at those indices are not
287 static void page_cache_delete_batch(struct address_space *mapping, in page_cache_delete_batch() argument
290 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
295 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
319 page->mapping = NULL; in page_cache_delete_batch()
332 mapping->nrpages -= total_pages; in page_cache_delete_batch()
335 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
343 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
347 unaccount_page_cache_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch()
349 page_cache_delete_batch(mapping, pvec); in delete_from_page_cache_batch()
350 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
353 page_cache_free_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch()
356 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
360 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
361 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
363 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
364 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
370 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
373 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
375 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
381 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
382 * @mapping: address space structure to write
385 * Call writepages on the mapping using the provided wbc to control the
390 int filemap_fdatawrite_wbc(struct address_space *mapping, in filemap_fdatawrite_wbc() argument
395 if (!mapping_can_writeback(mapping) || in filemap_fdatawrite_wbc()
396 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in filemap_fdatawrite_wbc()
399 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
400 ret = do_writepages(mapping, wbc); in filemap_fdatawrite_wbc()
407 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
408 * @mapping: address space structure to write
413 * Start writeback against all of a mapping's dirty pages that lie
423 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
433 return filemap_fdatawrite_wbc(mapping, &wbc); in __filemap_fdatawrite_range()
436 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
439 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
442 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
444 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
448 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
451 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
457 * @mapping: target address_space
464 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
466 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
472 * @mapping: address space within which to check
482 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
486 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
513 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
528 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range()
546 * @mapping: address space structure to wait for
560 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
563 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
564 return filemap_check_errors(mapping); in filemap_fdatawait_range()
570 * @mapping: address space structure to wait for
582 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
585 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
586 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
608 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
610 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
617 * @mapping: address space structure to wait for
629 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
631 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
632 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
637 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
639 return mapping->nrpages; in mapping_needs_writeback()
644 * @mapping: address space within which to check
656 bool filemap_range_needs_writeback(struct address_space *mapping, in filemap_range_needs_writeback() argument
659 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_needs_writeback()
663 if (!mapping_needs_writeback(mapping)) in filemap_range_needs_writeback()
665 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && in filemap_range_needs_writeback()
666 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) in filemap_range_needs_writeback()
687 * @mapping: the address_space for the pages
698 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
703 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
704 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
713 int err2 = filemap_fdatawait_range(mapping, in filemap_write_and_wait_range()
719 filemap_check_errors(mapping); in filemap_write_and_wait_range()
722 err = filemap_check_errors(mapping); in filemap_write_and_wait_range()
728 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
730 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
732 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
745 * Grab the wb_err from the mapping. If it matches what we have in the file,
748 * If it doesn't match, then take the mapping value, set the "seen" flag in
754 * While we handle mapping->wb_err with atomic operations, the f_wb_err
764 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
767 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
771 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
782 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
783 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
807 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
809 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
810 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
814 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
838 struct address_space *mapping = old->mapping; in replace_page_cache_page() local
839 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
841 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
845 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page()
848 new->mapping = mapping; in replace_page_cache_page()
856 old->mapping = NULL; in replace_page_cache_page()
874 struct address_space *mapping, in __add_to_page_cache_locked() argument
878 XA_STATE(xas, &mapping->i_pages, offset); in __add_to_page_cache_locked()
885 mapping_set_update(&xas, mapping); in __add_to_page_cache_locked()
888 page->mapping = mapping; in __add_to_page_cache_locked()
931 mapping->nrpages++; in __add_to_page_cache_locked()
950 page->mapping = NULL; in __add_to_page_cache_locked()
960 * @mapping: the page's address_space
969 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
972 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
977 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
984 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
1030 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
1032 * @mapping1: the first mapping to lock
1033 * @mapping2: the second mapping to lock
1050 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1052 * @mapping1: the first mapping to unlock
1053 * @mapping2: the second mapping to unlock
1628 struct address_space *mapping; in page_endio() local
1631 mapping = page_mapping(page); in page_endio()
1632 if (mapping) in page_endio()
1633 mapping_set_error(mapping, err); in page_endio()
1734 * @mapping: Mapping.
1751 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() argument
1754 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1770 * @mapping: Mapping.
1787 pgoff_t page_cache_prev_miss(struct address_space *mapping, in page_cache_prev_miss() argument
1790 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1806 * @mapping: the address_space to search
1809 * Looks up the page cache slot at @mapping & @index. If there is a
1817 static struct page *mapping_get_entry(struct address_space *mapping, in mapping_get_entry() argument
1820 XA_STATE(xas, &mapping->i_pages, index); in mapping_get_entry()
1856 * @mapping: The address_space to search.
1861 * Looks up the page cache entry at @mapping & @index.
1888 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page() argument
1894 page = mapping_get_entry(mapping, index); in pagecache_get_page()
1914 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1935 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) in pagecache_get_page()
1951 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); in pagecache_get_page()
2009 * @mapping: The address_space to search
2016 * the mapping. The entries are placed in @pvec. find_get_entries()
2019 * The search returns a group of mapping-contiguous page cache entries
2033 unsigned find_get_entries(struct address_space *mapping, pgoff_t start, in find_get_entries() argument
2036 XA_STATE(xas, &mapping->i_pages, start); in find_get_entries()
2066 * @mapping: The address_space to search.
2072 * find_lock_entries() will return a batch of entries from @mapping.
2085 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, in find_lock_entries() argument
2088 XA_STATE(xas, &mapping->i_pages, start); in find_lock_entries()
2101 if (page->mapping != mapping || PageWriteback(page)) in find_lock_entries()
2131 * @mapping: The address_space to search
2138 * pages in the mapping starting at index @start and up to index @end
2142 * The search returns a group of mapping-contiguous pages with ascending
2150 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, in find_get_pages_range() argument
2154 XA_STATE(xas, &mapping->i_pages, *start); in find_get_pages_range()
2192 * @mapping: The address_space to search
2202 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, in find_get_pages_contig() argument
2205 XA_STATE(xas, &mapping->i_pages, index); in find_get_pages_contig()
2246 * @mapping: the address_space to search
2259 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, in find_get_pages_range_tag() argument
2263 XA_STATE(xas, &mapping->i_pages, *index); in find_get_pages_range_tag()
2333 static void filemap_get_read_batch(struct address_space *mapping, in filemap_get_read_batch() argument
2336 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2369 static int filemap_read_page(struct file *file, struct address_space *mapping, in filemap_read_page() argument
2381 error = mapping->a_ops->readpage(file, page); in filemap_read_page()
2394 static bool filemap_range_uptodate(struct address_space *mapping, in filemap_range_uptodate() argument
2404 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2406 if (mapping->host->i_blkbits >= (PAGE_SHIFT + thp_order(page))) in filemap_range_uptodate()
2417 return mapping->a_ops->is_partially_uptodate(page, pos, count); in filemap_range_uptodate()
2421 struct address_space *mapping, struct iov_iter *iter, in filemap_update_page() argument
2427 if (!filemap_invalidate_trylock_shared(mapping)) in filemap_update_page()
2430 filemap_invalidate_lock_shared(mapping); in filemap_update_page()
2438 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2448 if (!page->mapping) in filemap_update_page()
2452 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page)) in filemap_update_page()
2459 error = filemap_read_page(iocb->ki_filp, mapping, page); in filemap_update_page()
2464 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2471 struct address_space *mapping, pgoff_t index, in filemap_create_page() argument
2477 page = page_cache_alloc(mapping); in filemap_create_page()
2489 * ->readpages() that need to hold invalidate_lock while mapping blocks in filemap_create_page()
2493 filemap_invalidate_lock_shared(mapping); in filemap_create_page()
2494 error = add_to_page_cache_lru(page, mapping, index, in filemap_create_page()
2495 mapping_gfp_constraint(mapping, GFP_KERNEL)); in filemap_create_page()
2501 error = filemap_read_page(file, mapping, page); in filemap_create_page()
2505 filemap_invalidate_unlock_shared(mapping); in filemap_create_page()
2509 filemap_invalidate_unlock_shared(mapping); in filemap_create_page()
2515 struct address_space *mapping, struct page *page, in filemap_readahead() argument
2520 page_cache_async_readahead(mapping, &file->f_ra, file, page, in filemap_readahead()
2529 struct address_space *mapping = filp->f_mapping; in filemap_get_pages() local
2541 filemap_get_read_batch(mapping, index, last_index, pvec); in filemap_get_pages()
2545 page_cache_sync_readahead(mapping, ra, filp, index, in filemap_get_pages()
2547 filemap_get_read_batch(mapping, index, last_index, pvec); in filemap_get_pages()
2552 err = filemap_create_page(filp, mapping, in filemap_get_pages()
2561 err = filemap_readahead(iocb, filp, mapping, page, last_index); in filemap_get_pages()
2568 err = filemap_update_page(iocb, mapping, iter, page); in filemap_get_pages()
2602 struct address_space *mapping = filp->f_mapping; in filemap_read() local
2603 struct inode *inode = mapping->host; in filemap_read()
2649 writably_mapped = mapping_writably_mapped(mapping); in filemap_read()
2738 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2739 struct inode *inode = mapping->host; in generic_file_read_iter()
2744 if (filemap_range_needs_writeback(mapping, iocb->ki_pos, in generic_file_read_iter()
2748 retval = filemap_write_and_wait_range(mapping, in generic_file_read_iter()
2757 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2784 struct address_space *mapping, struct page *page, in page_seek_hole_data() argument
2787 const struct address_space_operations *ops = mapping->a_ops; in page_seek_hole_data()
2788 size_t offset, bsz = i_blocksize(mapping->host); in page_seek_hole_data()
2798 if (unlikely(page->mapping != mapping)) in page_seek_hole_data()
2825 * @mapping: Address space to search.
2841 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, in mapping_seek_hole_data() argument
2844 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
2865 start = page_seek_hole_data(&xas, mapping, page, start, pos, in mapping_seek_hole_data()
2944 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
2945 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
2995 struct address_space *mapping = file->f_mapping; in do_async_mmap_readahead() local
3008 page_cache_async_readahead(mapping, ra, file, in do_async_mmap_readahead()
3042 struct address_space *mapping = file->f_mapping; in filemap_fault() local
3043 struct inode *inode = mapping->host; in filemap_fault()
3057 page = find_get_page(mapping, offset); in filemap_fault()
3066 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3081 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3084 page = pagecache_get_page(mapping, offset, in filemap_fault()
3090 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3099 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
3135 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3159 error = filemap_read_page(file, mapping, page); in filemap_fault()
3166 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3179 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3227 struct address_space *mapping, in next_uptodate_page() argument
3252 if (page->mapping != mapping) in next_uptodate_page()
3256 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_page()
3269 static inline struct page *first_map_page(struct address_space *mapping, in first_map_page() argument
3274 mapping, xas, end_pgoff); in first_map_page()
3277 static inline struct page *next_map_page(struct address_space *mapping, in next_map_page() argument
3282 mapping, xas, end_pgoff); in next_map_page()
3290 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
3293 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3299 head = first_map_page(mapping, &xas, end_pgoff); in filemap_map_pages()
3337 } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL); in filemap_map_pages()
3348 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite() local
3352 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3355 if (page->mapping != mapping) { in filemap_page_mkwrite()
3368 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3382 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
3384 if (!mapping->a_ops->readpage) in generic_file_mmap()
3431 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
3440 page = find_get_page(mapping, index); in do_read_cache_page()
3445 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
3458 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
3489 * otherwise serialising on page lock to stabilise the mapping gives in do_read_cache_page()
3493 * will be a race with remove_mapping that determines if the mapping in do_read_cache_page()
3512 if (!page->mapping) { in do_read_cache_page()
3540 * @mapping: the page's address_space
3550 * The function expects mapping->invalidate_lock to be already held.
3554 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
3559 return do_read_cache_page(mapping, index, filler, data, in read_cache_page()
3560 mapping_gfp_mask(mapping)); in read_cache_page()
3566 * @mapping: the page's address_space
3570 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3575 * The function expects mapping->invalidate_lock to be already held.
3579 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
3583 return do_read_cache_page(mapping, index, NULL, NULL, gfp); in read_cache_page_gfp()
3587 int pagecache_write_begin(struct file *file, struct address_space *mapping, in pagecache_write_begin() argument
3591 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_begin()
3593 return aops->write_begin(file, mapping, pos, len, flags, in pagecache_write_begin()
3598 int pagecache_write_end(struct file *file, struct address_space *mapping, in pagecache_write_end() argument
3602 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_end()
3604 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3632 struct address_space *mapping = file->f_mapping; in generic_file_direct_write() local
3633 struct inode *inode = mapping->host; in generic_file_direct_write()
3648 written = filemap_write_and_wait_range(mapping, pos, in generic_file_direct_write()
3660 written = invalidate_inode_pages2_range(mapping, in generic_file_direct_write()
3672 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
3689 * Skip invalidation for async writes or if mapping has no pages. in generic_file_direct_write()
3691 if (written > 0 && mapping->nrpages && in generic_file_direct_write()
3692 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) in generic_file_direct_write()
3715 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin() argument
3724 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3725 mapping_gfp_mask(mapping)); in grab_cache_page_write_begin()
3736 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
3737 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
3770 status = a_ops->write_begin(file, mapping, pos, bytes, flags, in generic_perform_write()
3775 if (mapping_writably_mapped(mapping)) in generic_perform_write()
3781 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
3804 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
3835 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter() local
3836 struct inode *inode = mapping->host; in __generic_file_write_iter()
3883 err = filemap_write_and_wait_range(mapping, pos, endbyte); in __generic_file_write_iter()
3887 invalidate_mapping_pages(mapping, in __generic_file_write_iter()
3957 struct address_space * const mapping = page->mapping; in try_to_release_page() local
3963 if (mapping && mapping->a_ops->releasepage) in try_to_release_page()
3964 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()