Lines Matching full:mapping

124 static void page_cache_delete(struct address_space *mapping,  in page_cache_delete()  argument
127 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
130 mapping_set_update(&xas, mapping); in page_cache_delete()
143 folio->mapping = NULL; in page_cache_delete()
145 mapping->nrpages -= nr; in page_cache_delete()
148 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
161 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
190 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
208 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
209 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
219 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local
222 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
223 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
226 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
231 free_folio = mapping->a_ops->free_folio; in filemap_free_folio()
250 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local
253 spin_lock(&mapping->host->i_lock); in filemap_remove_folio()
254 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio()
256 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio()
257 if (mapping_shrinkable(mapping)) in filemap_remove_folio()
258 inode_add_lru(mapping->host); in filemap_remove_folio()
259 spin_unlock(&mapping->host->i_lock); in filemap_remove_folio()
261 filemap_free_folio(mapping, folio); in filemap_remove_folio()
266 * @mapping: the mapping to which folios belong
269 * The function walks over mapping->i_pages and removes folios passed in
270 * @fbatch from the mapping. The function expects @fbatch to be sorted
272 * It tolerates holes in @fbatch (mapping entries at those indices are not
277 static void page_cache_delete_batch(struct address_space *mapping, in page_cache_delete_batch() argument
280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
285 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
308 folio->mapping = NULL; in page_cache_delete_batch()
315 mapping->nrpages -= total_pages; in page_cache_delete_batch()
318 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
326 spin_lock(&mapping->host->i_lock); in delete_from_page_cache_batch()
327 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
332 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
334 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch()
335 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
336 if (mapping_shrinkable(mapping)) in delete_from_page_cache_batch()
337 inode_add_lru(mapping->host); in delete_from_page_cache_batch()
338 spin_unlock(&mapping->host->i_lock); in delete_from_page_cache_batch()
341 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
344 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
348 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
349 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
351 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
352 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
358 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
361 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
363 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
369 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
370 * @mapping: address space structure to write
373 * Call writepages on the mapping using the provided wbc to control the
378 int filemap_fdatawrite_wbc(struct address_space *mapping, in filemap_fdatawrite_wbc() argument
383 if (!mapping_can_writeback(mapping) || in filemap_fdatawrite_wbc()
384 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in filemap_fdatawrite_wbc()
387 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
388 ret = do_writepages(mapping, wbc); in filemap_fdatawrite_wbc()
395 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
396 * @mapping: address space structure to write
401 * Start writeback against all of a mapping's dirty pages that lie
411 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
421 return filemap_fdatawrite_wbc(mapping, &wbc); in __filemap_fdatawrite_range()
424 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
427 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
430 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
432 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
436 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
439 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
445 * @mapping: target address_space
452 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
454 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
460 * @mapping: address space within which to check
470 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
474 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
501 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
516 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range()
534 * @mapping: address space structure to wait for
548 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
551 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
552 return filemap_check_errors(mapping); in filemap_fdatawait_range()
558 * @mapping: address space structure to wait for
570 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
573 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
574 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
596 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
598 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
605 * @mapping: address space structure to wait for
617 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
619 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
620 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
625 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
627 return mapping->nrpages; in mapping_needs_writeback()
630 bool filemap_range_has_writeback(struct address_space *mapping, in filemap_range_has_writeback() argument
633 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
657 * @mapping: the address_space for the pages
668 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
673 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
674 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
683 __filemap_fdatawait_range(mapping, lstart, lend); in filemap_write_and_wait_range()
685 err2 = filemap_check_errors(mapping); in filemap_write_and_wait_range()
692 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
694 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
696 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
709 * Grab the wb_err from the mapping. If it matches what we have in the file,
712 * If it doesn't match, then take the mapping value, set the "seen" flag in
718 * While we handle mapping->wb_err with atomic operations, the f_wb_err
728 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
731 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
735 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
746 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
747 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
771 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
773 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
774 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
778 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
804 struct address_space *mapping = old->mapping; in replace_page_cache_page() local
805 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_page()
807 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
811 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page()
814 new->mapping = mapping; in replace_page_cache_page()
822 old->mapping = NULL; in replace_page_cache_page()
839 noinline int __filemap_add_folio(struct address_space *mapping, in __filemap_add_folio() argument
842 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
849 mapping_set_update(&xas, mapping); in __filemap_add_folio()
863 folio->mapping = mapping; in __filemap_add_folio()
889 BUG_ON(shmem_mapping(mapping)); in __filemap_add_folio()
899 mapping->nrpages += nr; in __filemap_add_folio()
920 folio->mapping = NULL; in __filemap_add_folio()
927 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
934 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
979 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
981 * @mapping1: the first mapping to lock
982 * @mapping2: the second mapping to lock
999 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1001 * @mapping1: the first mapping to unlock
1002 * @mapping2: the second mapping to unlock
1641 struct address_space *mapping; in page_endio() local
1644 mapping = folio_mapping(folio); in page_endio()
1645 if (mapping) in page_endio()
1646 mapping_set_error(mapping, err); in page_endio()
1743 * @mapping: Mapping.
1760 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() argument
1763 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1779 * @mapping: Mapping.
1796 pgoff_t page_cache_prev_miss(struct address_space *mapping, in page_cache_prev_miss() argument
1799 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1835 * @mapping: the address_space to search
1838 * Looks up the page cache entry at @mapping & @index. If it is a folio,
1845 static void *mapping_get_entry(struct address_space *mapping, pgoff_t index) in mapping_get_entry() argument
1847 XA_STATE(xas, &mapping->i_pages, index); in mapping_get_entry()
1878 * @mapping: The address_space to search.
1883 * Looks up the page cache entry at @mapping & @index.
1909 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio() argument
1915 folio = mapping_get_entry(mapping, index); in __filemap_get_folio()
1935 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1956 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) in __filemap_get_folio()
1976 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
2033 * @mapping: The address_space to search
2040 * the mapping. The entries are placed in @fbatch. find_get_entries()
2051 unsigned find_get_entries(struct address_space *mapping, pgoff_t start, in find_get_entries() argument
2054 XA_STATE(xas, &mapping->i_pages, start); in find_get_entries()
2070 * @mapping: The address_space to search.
2076 * find_lock_entries() will return a batch of entries from @mapping.
2088 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, in find_lock_entries() argument
2091 XA_STATE(xas, &mapping->i_pages, start); in find_lock_entries()
2103 if (folio->mapping != mapping || in find_lock_entries()
2125 * @mapping: The address_space to search
2130 * Search for and return a batch of folios in the mapping starting at
2144 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, in filemap_get_folios() argument
2147 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios()
2194 * @mapping: The address_space to search
2207 unsigned filemap_get_folios_contig(struct address_space *mapping, in filemap_get_folios_contig() argument
2210 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2267 * @mapping: the address_space to search
2280 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, in find_get_pages_range_tag() argument
2284 XA_STATE(xas, &mapping->i_pages, *index); in find_get_pages_range_tag()
2354 static void filemap_get_read_batch(struct address_space *mapping, in filemap_get_read_batch() argument
2357 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2423 static bool filemap_range_uptodate(struct address_space *mapping, in filemap_range_uptodate() argument
2433 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2435 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2446 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2450 struct address_space *mapping, struct iov_iter *iter, in filemap_update_page() argument
2456 if (!filemap_invalidate_trylock_shared(mapping)) in filemap_update_page()
2459 filemap_invalidate_lock_shared(mapping); in filemap_update_page()
2467 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2481 if (!folio->mapping) in filemap_update_page()
2485 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, folio)) in filemap_update_page()
2492 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_update_page()
2498 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2505 struct address_space *mapping, pgoff_t index, in filemap_create_folio() argument
2511 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); in filemap_create_folio()
2525 * while mapping blocks for IO so let's hold the lock here as in filemap_create_folio()
2528 filemap_invalidate_lock_shared(mapping); in filemap_create_folio()
2529 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2530 mapping_gfp_constraint(mapping, GFP_KERNEL)); in filemap_create_folio()
2536 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2540 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2544 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2550 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2553 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2565 struct address_space *mapping = filp->f_mapping; in filemap_get_pages() local
2577 filemap_get_read_batch(mapping, index, last_index, fbatch); in filemap_get_pages()
2581 page_cache_sync_readahead(mapping, ra, filp, index, in filemap_get_pages()
2583 filemap_get_read_batch(mapping, index, last_index, fbatch); in filemap_get_pages()
2588 err = filemap_create_folio(filp, mapping, in filemap_get_pages()
2597 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2605 err = filemap_update_page(iocb, mapping, iter, folio); in filemap_get_pages()
2646 struct address_space *mapping = filp->f_mapping; in filemap_read() local
2647 struct inode *inode = mapping->host; in filemap_read()
2696 writably_mapped = mapping_writably_mapped(mapping); in filemap_read()
2781 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2782 struct inode *inode = mapping->host; in generic_file_read_iter()
2785 if (filemap_range_needs_writeback(mapping, iocb->ki_pos, in generic_file_read_iter()
2789 retval = filemap_write_and_wait_range(mapping, in generic_file_read_iter()
2798 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2826 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
2829 const struct address_space_operations *ops = mapping->a_ops; in folio_seek_hole_data()
2830 size_t offset, bsz = i_blocksize(mapping->host); in folio_seek_hole_data()
2840 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
2867 * @mapping: Address space to search.
2883 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, in mapping_seek_hole_data() argument
2886 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
2907 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
2987 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
2988 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3104 struct address_space *mapping = file->f_mapping; in filemap_fault() local
3105 struct inode *inode = mapping->host; in filemap_fault()
3118 folio = filemap_get_folio(mapping, index); in filemap_fault()
3127 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3142 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3145 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3151 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3160 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3196 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3220 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3227 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3240 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3281 struct address_space *mapping, in next_uptodate_page() argument
3304 if (folio->mapping != mapping) in next_uptodate_page()
3308 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_page()
3321 static inline struct folio *first_map_page(struct address_space *mapping, in first_map_page() argument
3326 mapping, xas, end_pgoff); in first_map_page()
3329 static inline struct folio *next_map_page(struct address_space *mapping, in next_map_page() argument
3334 mapping, xas, end_pgoff); in next_map_page()
3342 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
3345 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3352 folio = first_map_page(mapping, &xas, end_pgoff); in filemap_map_pages()
3405 } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL); in filemap_map_pages()
3416 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite() local
3420 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3423 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3436 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3450 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
3452 if (!mapping->a_ops->read_folio) in generic_file_mmap()
3487 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio() argument
3494 filler = mapping->a_ops->read_folio; in do_read_cache_folio()
3496 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3501 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3520 /* Folio was truncated from mapping */ in do_read_cache_folio()
3521 if (!folio->mapping) { in do_read_cache_folio()
3549 * @mapping: The address_space to read from.
3560 * Context: May sleep. Expects mapping->invalidate_lock to be held.
3563 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio() argument
3566 return do_read_cache_folio(mapping, index, filler, file, in read_cache_folio()
3567 mapping_gfp_mask(mapping)); in read_cache_folio()
3571 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
3576 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3582 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
3585 return do_read_cache_page(mapping, index, filler, file, in read_cache_page()
3586 mapping_gfp_mask(mapping)); in read_cache_page()
3592 * @mapping: the page's address_space
3596 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3601 * The function expects mapping->invalidate_lock to be already held.
3605 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
3609 return do_read_cache_page(mapping, index, NULL, NULL, gfp); in read_cache_page_gfp()
3637 struct address_space *mapping = file->f_mapping; in generic_file_direct_write() local
3638 struct inode *inode = mapping->host; in generic_file_direct_write()
3653 written = filemap_write_and_wait_range(mapping, pos, in generic_file_direct_write()
3665 written = invalidate_inode_pages2_range(mapping, in generic_file_direct_write()
3677 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
3694 * Skip invalidation for async writes or if mapping has no pages. in generic_file_direct_write()
3696 if (written > 0 && mapping->nrpages && in generic_file_direct_write()
3697 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) in generic_file_direct_write()
3720 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
3721 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
3753 status = a_ops->write_begin(file, mapping, pos, bytes, in generic_perform_write()
3758 if (mapping_writably_mapped(mapping)) in generic_perform_write()
3764 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
3787 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
3818 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter() local
3819 struct inode *inode = mapping->host; in __generic_file_write_iter()
3867 err = filemap_write_and_wait_range(mapping, pos, endbyte); in __generic_file_write_iter()
3871 invalidate_mapping_pages(mapping, in __generic_file_write_iter()
3941 struct address_space * const mapping = folio->mapping; in filemap_release_folio() local
3947 if (mapping && mapping->a_ops->release_folio) in filemap_release_folio()
3948 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()