Lines Matching refs:mapping

1880 int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,  in balance_dirty_pages_ratelimited_flags()  argument
1883 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited_flags()
1950 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument
1952 balance_dirty_pages_ratelimited_flags(mapping, 0); in balance_dirty_pages_ratelimited()
2229 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument
2232 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2282 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument
2299 index = mapping->writeback_index; /* prev offset */ in write_cache_pages()
2308 tag_pages_for_writeback(mapping, index, end); in write_cache_pages()
2317 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages()
2337 if (unlikely(page->mapping != mapping)) { in write_cache_pages()
2359 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); in write_cache_pages()
2411 mapping->writeback_index = done_index; in write_cache_pages()
2424 struct address_space *mapping = data; in __writepage() local
2425 int ret = mapping->a_ops->writepage(page, wbc); in __writepage()
2426 mapping_set_error(mapping, ret); in __writepage()
2440 int generic_writepages(struct address_space *mapping, in generic_writepages() argument
2447 if (!mapping->a_ops->writepage) in generic_writepages()
2451 ret = write_cache_pages(mapping, wbc, __writepage, mapping); in generic_writepages()
2458 int do_writepages(struct address_space *mapping, struct writeback_control *wbc) in do_writepages() argument
2465 wb = inode_to_wb_wbc(mapping->host, wbc); in do_writepages()
2468 if (mapping->a_ops->writepages) in do_writepages()
2469 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages()
2471 ret = generic_writepages(mapping, wbc); in do_writepages()
2508 struct address_space *mapping = folio->mapping; in folio_write_one() local
2521 ret = mapping->a_ops->writepage(&folio->page, &wbc); in folio_write_one()
2530 ret = filemap_check_errors(mapping); in folio_write_one()
2538 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio) in noop_dirty_folio() argument
2554 struct address_space *mapping) in folio_account_dirtied() argument
2556 struct inode *inode = mapping->host; in folio_account_dirtied()
2558 trace_writeback_dirty_folio(folio, mapping); in folio_account_dirtied()
2560 if (mapping_can_writeback(mapping)) { in folio_account_dirtied()
2608 void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, in __folio_mark_dirty() argument
2613 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_mark_dirty()
2614 if (folio->mapping) { /* Race with truncate? */ in __folio_mark_dirty()
2616 folio_account_dirtied(folio, mapping); in __folio_mark_dirty()
2617 __xa_set_mark(&mapping->i_pages, folio_index(folio), in __folio_mark_dirty()
2620 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_mark_dirty()
2642 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio) in filemap_dirty_folio() argument
2650 __folio_mark_dirty(folio, mapping, !folio_test_private(folio)); in filemap_dirty_folio()
2653 if (mapping->host) { in filemap_dirty_folio()
2655 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in filemap_dirty_folio()
2675 struct address_space *mapping = folio->mapping; in folio_account_redirty() local
2677 if (mapping && mapping_can_writeback(mapping)) { in folio_account_redirty()
2678 struct inode *inode = mapping->host; in folio_account_redirty()
2711 ret = filemap_dirty_folio(folio->mapping, folio); in folio_redirty_for_writepage()
2733 struct address_space *mapping = folio_mapping(folio); in folio_mark_dirty() local
2735 if (likely(mapping)) { in folio_mark_dirty()
2749 return mapping->a_ops->dirty_folio(mapping, folio); in folio_mark_dirty()
2752 return noop_dirty_folio(mapping, folio); in folio_mark_dirty()
2792 struct address_space *mapping = folio_mapping(folio); in __folio_cancel_dirty() local
2794 if (mapping_can_writeback(mapping)) { in __folio_cancel_dirty()
2795 struct inode *inode = mapping->host; in __folio_cancel_dirty()
2829 struct address_space *mapping = folio_mapping(folio); in folio_clear_dirty_for_io() local
2834 if (mapping && mapping_can_writeback(mapping)) { in folio_clear_dirty_for_io()
2835 struct inode *inode = mapping->host; in folio_clear_dirty_for_io()
2914 struct address_space *mapping = folio_mapping(folio); in __folio_end_writeback() local
2918 if (mapping && mapping_use_writeback_tags(mapping)) { in __folio_end_writeback()
2919 struct inode *inode = mapping->host; in __folio_end_writeback()
2923 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_end_writeback()
2926 __xa_clear_mark(&mapping->i_pages, folio_index(folio), in __folio_end_writeback()
2933 if (!mapping_tagged(mapping, in __folio_end_writeback()
2939 if (mapping->host && !mapping_tagged(mapping, in __folio_end_writeback()
2941 sb_clear_inode_writeback(mapping->host); in __folio_end_writeback()
2943 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_end_writeback()
2959 struct address_space *mapping = folio_mapping(folio); in __folio_start_writeback() local
2964 if (mapping && mapping_use_writeback_tags(mapping)) { in __folio_start_writeback()
2965 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_start_writeback()
2966 struct inode *inode = mapping->host; in __folio_start_writeback()
2976 on_wblist = mapping_tagged(mapping, in __folio_start_writeback()
2993 if (mapping->host && !on_wblist) in __folio_start_writeback()
2994 sb_mark_inode_writeback(mapping->host); in __folio_start_writeback()