Home
last modified time | relevance | path

Searched refs:page_mapping (Results 1 – 25 of 25) sorted by relevance

/Linux-v4.19/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_init_ops.h622 BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); in bnx2x_ilt_line_mem_op()
625 BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size); in bnx2x_ilt_line_mem_op()
677 dma_addr_t page_mapping) in bnx2x_ilt_line_wr() argument
686 bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); in bnx2x_ilt_line_wr()
700 bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping); in bnx2x_ilt_line_init_op()
Dbnx2x_init.h504 dma_addr_t page_mapping; member
Dbnx2x_sriov.c1662 ilt->lines[line+i].page_mapping = hw_cxt->mapping; in bnx2x_iov_init_ilt()
Dbnx2x_main.c7977 ilt->lines[cdu_ilt_start + i].page_mapping = in bnx2x_init_hw_func()
/Linux-v4.19/mm/
Dutil.c507 struct address_space *page_mapping(struct page *page) in page_mapping() function
530 EXPORT_SYMBOL(page_mapping);
539 return page_mapping(page); in page_mapping_file()
Dmemory-failure.c99 mapping = page_mapping(p); in hwpoison_filter_dev()
666 mapping = page_mapping(p); in me_pagecache_clean()
689 struct address_space *mapping = page_mapping(p); in me_pagecache_dirty()
790 mapping = page_mapping(hpage); in me_huge_page()
1007 mapping = page_mapping(hpage); in hwpoison_user_mappings()
1563 if (page_mapping(page)) { in unpoison_memory()
Dpage-writeback.c2462 struct address_space *mapping = page_mapping(page); in __set_page_dirty_nobuffers()
2471 BUG_ON(page_mapping(page) != mapping); in __set_page_dirty_nobuffers()
2544 struct address_space *mapping = page_mapping(page); in set_page_dirty()
2611 struct address_space *mapping = page_mapping(page); in __cancel_dirty_page()
2648 struct address_space *mapping = page_mapping(page); in clear_page_dirty_for_io()
2709 struct address_space *mapping = page_mapping(page); in test_clear_page_writeback()
2759 struct address_space *mapping = page_mapping(page); in __test_set_page_writeback()
Dvmscan.c779 if (page_mapping(page) == mapping) in handle_write_error()
882 BUG_ON(mapping != page_mapping(page)); in __remove_mapping()
1088 mapping = page_mapping(page); in page_check_dirty_writeback()
1169 mapping = page_mapping(page); in shrink_page_list()
1307 mapping = page_mapping(page); in shrink_page_list()
1388 mapping = page_mapping(page); in shrink_page_list()
1589 mapping = page_mapping(page); in __isolate_lru_page()
4159 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); in page_evictable()
Dcompaction.c103 mapping = page_mapping(page); in PageMovable()
837 if (!page_mapping(page) && in isolate_migratepages_block()
845 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block()
Drmap.c980 mapping = page_mapping(page); in page_mkclean()
1195 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1858 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
Dmigrate.c124 mapping = page_mapping(page); in isolate_movable_page()
154 mapping = page_mapping(page); in putback_movable_page()
928 mapping = page_mapping(page); in move_to_new_page()
2387 if (page_mapping(page)) in migrate_vma_check_page()
2743 mapping = page_mapping(page); in migrate_vma_pages()
Dzsmalloc.c1905 __SetPageMovable(newpage, page_mapping(oldpage)); in replace_sub_page()
1932 mapping = page_mapping(page); in zs_page_isolate()
2093 mapping = page_mapping(page); in zs_page_putback()
Dtruncate.c259 struct address_space *mapping = page_mapping(page); in invalidate_inode_page()
Dfilemap.c296 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
1256 mapping = page_mapping(page); in page_endio()
1499 if (unlikely(page_mapping(page) != mapping)) { in find_lock_entry()
Dshmem.c861 if (page_mapping(page) == mapping) { in shmem_undo_range()
966 if (page_mapping(page) == mapping) { in shmem_undo_range()
1555 swap_mapping = page_mapping(oldpage); in shmem_replace_page()
Dkhugepaged.c1396 if (page_mapping(page) != mapping) { in collapse_shmem()
Dmemcontrol.c4821 struct address_space *mapping = page_mapping(page); in mem_cgroup_move_account()
/Linux-v4.19/arch/nds32/mm/
Dcacheflush.c242 mapping = page_mapping(page); in flush_dcache_page()
/Linux-v4.19/include/linux/
Dmm.h1191 extern struct address_space *page_mapping(struct page *page);
1218 struct address_space *page_mapping(struct page *page);
/Linux-v4.19/Documentation/vm/
Dpage_migration.rst223 use page_mapping which mask off the low two bits of page->mapping under
Dunevictable-lru.rst309 populate_vma_page_range() checks page_mapping() after acquiring the page lock.
422 zeros out the page_mapping of the old page before unlocking it, so m[un]lock
/Linux-v4.19/fs/
Dbuffer.c617 struct address_space *mapping = page_mapping(page); in __set_page_dirty_buffers()
1108 mapping = page_mapping(page); in mark_buffer_dirty()
Dsplice.c54 mapping = page_mapping(page); in page_cache_pipe_buf_steal()
Diomap.c675 struct address_space *mapping = page_mapping(page); in iomap_set_page_dirty()
/Linux-v4.19/fs/f2fs/
Ddata.c2702 struct address_space *mapping = page_mapping(page); in f2fs_clear_radix_tree_dirty_tag()