| /Linux-v5.4/include/linux/ |
| D | page-flags.h | 172 static inline struct page *compound_head(struct page *page) in compound_head() function 174 unsigned long head = READ_ONCE(page->compound_head); in compound_head() 183 return READ_ONCE(page->compound_head) & 1; in PageTail() 232 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) 238 PF_POISONED_CHECK(compound_head(page)); }) 383 page = compound_head(page); in PAGEFLAG() 463 page = compound_head(page); in PageAnon() 482 page = compound_head(page); in PageKsm() 495 page = compound_head(page); in PageUptodate() 554 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); in __PAGEFLAG() [all …]
|
| D | pagemap.h | 414 pgoff = compound_head(page)->index; in page_to_index() 415 pgoff += page - compound_head(page); in page_to_index() 469 page = compound_head(page); in trylock_page() 527 wait_on_page_bit(compound_head(page), PG_locked); in wait_on_page_locked() 534 return wait_on_page_bit_killable(compound_head(page), PG_locked); in wait_on_page_locked_killable()
|
| D | swapops.h | 166 BUG_ON(!PageLocked(compound_head(page))); in make_migration_entry() 195 BUG_ON(!PageLocked(compound_head(p))); in migration_entry_to_page()
|
| D | page_ref.h | 72 return atomic_read(&compound_head(page)->_refcount); in page_count()
|
| D | migrate.h | 42 return alloc_huge_page_nodemask(page_hstate(compound_head(page)), in new_page_nodemask()
|
| D | mm_types.h | 131 unsigned long compound_head; /* Bit zero is set */ member
|
| D | mm.h | 701 page = compound_head(page); in compound_mapcount() 748 return compound_head(page); in virt_to_head_page() 1018 page = compound_head(page); in get_page() 1029 page = compound_head(page); in try_get_page() 1038 page = compound_head(page); in put_page()
|
| /Linux-v5.4/mm/ |
| D | memory-failure.c | 329 tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT; in add_to_kill() 786 struct page *hpage = compound_head(p); in me_huge_page() 932 struct page *head = compound_head(page); in get_hwpoison_page() 949 if (head == compound_head(page)) in get_hwpoison_page() 1087 struct page *head = compound_head(p); in memory_failure_hugetlb() 1281 orig_head = hpage = compound_head(p); in memory_failure() 1322 hpage = compound_head(p); in memory_failure() 1349 if (PageCompound(p) && compound_head(p) != orig_head) { in memory_failure() 1545 page = compound_head(p); in unpoison_memory() 1687 struct page *hpage = compound_head(page); in soft_offline_huge_page() [all …]
|
| D | util.c | 610 page = compound_head(page); in page_rmapping() 624 page = compound_head(page); in page_mapped() 641 page = compound_head(page); in page_anon_vma() 652 page = compound_head(page); in page_mapping() 695 page = compound_head(page); in __page_mapcount()
|
| D | hwpoison-inject.c | 28 hpage = compound_head(p); in hwpoison_inject()
|
| D | swap.c | 309 page = compound_head(page); in activate_page() 329 page = compound_head(page); in activate_page() 375 page = compound_head(page); in mark_page_accessed() 801 page = compound_head(page); in release_pages()
|
| D | usercopy.c | 236 page = compound_head(kmap_to_page((void *)ptr)); in check_heap_object()
|
| D | rmap.c | 1005 page = compound_head(page); in page_move_anon_rmap() 1201 SetPageDoubleMap(compound_head(page)); in page_add_file_rmap() 1203 clear_page_mlock(compound_head(page)); in page_add_file_rmap() 1325 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
|
| D | gup.c | 71 struct page *page = compound_head(pages[index]); in put_user_pages_dirty_lock() 1450 struct page *head = compound_head(pages[i]); in check_and_migrate_cma_pages() 1802 struct page *head = compound_head(page); in try_get_compound_head() 1857 VM_BUG_ON_PAGE(compound_head(page) != head, page); in gup_pte_range() 2001 VM_BUG_ON(compound_head(page) != head); in gup_hugepte()
|
| D | filemap.c | 1249 page = compound_head(page); in put_and_wait_on_page_locked() 1314 page = compound_head(page); in unlock_page() 1381 struct page *page = compound_head(__page); in __lock_page() 1390 struct page *page = compound_head(__page); in __lock_page_killable() 1652 if (unlikely(compound_head(page)->mapping != mapping)) { in pagecache_get_page() 2545 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
|
| D | memory_hotplug.c | 1278 head = compound_head(page); in scan_movable_pages() 1318 struct page *head = compound_head(page); in do_migrate_range() 1323 pfn = page_to_pfn(compound_head(page)) in do_migrate_range()
|
| D | huge_memory.c | 502 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in get_deferred_split_queue() 2500 struct page *head = compound_head(page); in __split_huge_page() 2639 page = compound_head(page); in page_trans_huge_mapcount() 2695 struct page *head = compound_head(page); in split_huge_page_to_list() 2854 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in deferred_split_huge_page() 2919 page = compound_head(page); in deferred_split_scan()
|
| D | swapfile.c | 1573 page = compound_head(page); in page_swapped() 1603 page = compound_head(page); in page_trans_huge_map_swapcount() 1669 page = compound_head(page); in reuse_swap_page() 1722 page = compound_head(page); in try_to_free_swap()
|
| D | mempolicy.c | 974 struct page *head = compound_head(page); in migrate_page_add() 1003 return alloc_huge_page_node(page_hstate(compound_head(page)), in alloc_new_node_page() 1179 return alloc_huge_page_vma(page_hstate(compound_head(page)), in new_page()
|
| D | madvise.c | 887 order = compound_order(compound_head(page)); in madvise_inject_error()
|
| /Linux-v5.4/fs/proc/ |
| D | page.c | 135 struct page *head = compound_head(page); in stable_page_flags() 168 if (PageTail(page) && PageSlab(compound_head(page))) in stable_page_flags()
|
| /Linux-v5.4/arch/powerpc/mm/book3s64/ |
| D | iommu_api.c | 133 pageshift = page_shift(compound_head(page)); in mm_iommu_do_alloc()
|
| /Linux-v5.4/kernel/ |
| D | crash_core.c | 431 VMCOREINFO_OFFSET(page, compound_head); in crash_save_vmcoreinfo_init()
|
| /Linux-v5.4/drivers/infiniband/core/ |
| D | umem_odp.c | 746 struct page *head_page = compound_head(page); in ib_umem_odp_unmap_dma_pages()
|
| /Linux-v5.4/Documentation/admin-guide/kdump/ |
| D | vmcoreinfo.rst | 133 (page, flags|_refcount|mapping|lru|_mapcount|private|compound_dtor|compound_order|compound_head)
|