/Linux-v4.19/mm/ |
D | page_isolation.c | 27 zone = page_zone(page); in set_migratetype_isolate() 100 zone = page_zone(page); in unset_migratetype_isolate() 301 zone = page_zone(page); in test_pages_isolated()
|
D | mlock.c | 64 mod_zone_page_state(page_zone(page), NR_MLOCK, in clear_page_mlock() 97 mod_zone_page_state(page_zone(page), NR_MLOCK, in mlock_vma_page() 185 struct zone *zone = page_zone(page); in munlock_vma_page() 404 if (!page || page_zone(page) != zone) in __munlock_pagevec_fill() 489 zone = page_zone(page); in munlock_vma_pages_range()
|
D | memory_hotplug.c | 334 if (zone && zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 362 if (zone && zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn() 421 if (page_zone(pfn_to_page(pfn)) != zone) in shrink_zone_span() 1247 zone = page_zone(page); in is_pageblock_removable_nolock() 1300 if (zone && page_zone(page) != zone) in test_pages_in_a_zone() 1304 zone = page_zone(page); in test_pages_in_a_zone() 1614 zone = page_zone(pfn_to_page(valid_start)); in __offline_pages()
|
D | mmzone.c | 82 if (page_zone(page) != zone) in memmap_valid_within()
|
D | internal.h | 325 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in mlock_migrate_page() 327 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); in mlock_migrate_page()
|
D | cma.c | 113 zone = page_zone(pfn_to_page(pfn)); in cma_activate_area() 127 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
|
D | vmstat.c | 412 __inc_zone_state(page_zone(page), item); in __inc_zone_page_state() 456 __dec_zone_state(page_zone(page), item); in __dec_zone_page_state() 526 mod_zone_state(page_zone(page), item, 1, 1); in inc_zone_page_state() 532 mod_zone_state(page_zone(page), item, -1, -1); in dec_zone_page_state() 617 zone = page_zone(page); in inc_zone_page_state() 1434 if (page_zone(page) != zone) in pagetypeinfo_showblockcount_print()
|
D | page_alloc.c | 349 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap() 359 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); in pfn_to_bitidx() 428 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); in set_pfnblock_flags_mask() 481 if (zone != page_zone(page)) in page_is_consistent() 1251 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok() 1270 page_zone(page)->managed_pages += nr_pages; in __free_pages_boot_core() 1370 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page() 2016 page_zone(start_page) != page_zone(end_page)); in move_freepages() 2702 if (page_zone(page) != zone) in mark_free_pages() 2742 struct zone *zone = page_zone(page); in free_unref_page_commit() [all …]
|
D | swap.c | 62 struct zone *zone = page_zone(page); in __page_cache_release() 331 struct zone *zone = page_zone(page); in activate_page() 472 __mod_zone_page_state(page_zone(page), NR_MLOCK, in lru_cache_add_active_or_unevictable()
|
D | page_idle.c | 44 zone = page_zone(page); in page_idle_get_page()
|
D | page_owner.c | 293 if (page_zone(page) != zone) in pagetypeinfo_showmixedcount_print() 560 if (page_zone(page) != zone) in init_pages_in_zone()
|
D | huge_memory.c | 2416 struct zone *zone = page_zone(head); in __split_huge_page() 2455 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); in __split_huge_page() 2655 spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags); in split_huge_page_to_list() 2703 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); in split_huge_page_to_list() 2836 if (zone != page_zone(page)) in split_huge_pages_set()
|
D | migrate.c | 470 oldzone = page_zone(page); in migrate_page_move_mapping() 471 newzone = page_zone(newpage); in migrate_page_move_mapping()
|
D | memory-failure.c | 254 drain_all_pages(page_zone(p)); in shake_page()
|
/Linux-v4.19/include/linux/ |
D | vmstat.h | 330 __inc_zone_state(page_zone(page), item); in __inc_zone_page_state() 343 __dec_zone_state(page_zone(page), item); in __dec_zone_page_state()
|
D | migrate.h | 50 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) in new_page_nodemask()
|
/Linux-v4.19/arch/sh/mm/ |
D | init.c | 454 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
/Linux-v4.19/arch/powerpc/mm/ |
D | mem.c | 158 ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); in arch_remove_memory()
|
/Linux-v4.19/arch/ia64/mm/ |
D | init.c | 672 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
/Linux-v4.19/fs/proc/ |
D | kcore.c | 173 if (!memmap_valid_within(pfn, p, page_zone(p))) in kclist_add_private()
|
/Linux-v4.19/drivers/base/ |
D | memory.c | 432 strcat(buf, page_zone(pfn_to_page(start_pfn))->name); in show_valid_zones()
|
/Linux-v4.19/arch/x86/mm/ |
D | init_32.c | 870 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
D | init_64.c | 1162 zone = page_zone(page); in arch_remove_memory()
|
/Linux-v4.19/kernel/power/ |
D | snapshot.c | 1218 if (page_zone(page) != zone) in saveable_highmem_page() 1280 if (page_zone(page) != zone) in saveable_page()
|
/Linux-v4.19/kernel/ |
D | fork.c | 350 mod_zone_page_state(page_zone(vm->pages[i]), in account_kernel_stack() 365 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, in account_kernel_stack()
|