Searched refs:PAGES_PER_SECTION (Results 1 – 17 of 17) sorted by relevance
| /Linux-v5.4/arch/x86/mm/ |
| D | numa_32.c | 54 start = round_down(start, PAGES_PER_SECTION); in memory_present() 55 end = round_up(end, PAGES_PER_SECTION); in memory_present() 56 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present() 57 physnode_map[pfn / PAGES_PER_SECTION] = nid; in memory_present()
|
| D | numa.c | 559 if (pfn_align && pfn_align < PAGES_PER_SECTION) { in numa_register_memblks() 562 PFN_PHYS(PAGES_PER_SECTION) >> 20); in numa_register_memblks()
|
| D | init_64.c | 1521 if (end - start < PAGES_PER_SECTION * sizeof(struct page)) in vmemmap_populate()
|
| /Linux-v5.4/mm/ |
| D | page_ext.c | 232 table_size = page_ext_size * PAGES_PER_SECTION; in init_section_page_ext() 265 table_size = page_ext_size * PAGES_PER_SECTION; in free_page_ext() 306 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { in online_page_ext() 315 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext() 329 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext() 385 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { in page_ext_init()
|
| D | sparse.c | 243 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init() 277 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present() 442 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); in section_map_size() 448 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); in section_map_size() 548 map = __populate_section_memmap(pfn, PAGES_PER_SECTION, in sparse_init_nid() 613 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections() 632 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in offline_mem_sections() 667 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); in free_map_bootmem() 676 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; in populate_section_memmap() 703 get_order(sizeof(struct page) * PAGES_PER_SECTION)); in depopulate_section_memmap() [all …]
|
| D | memory_hotplug.c | 184 mapsize = sizeof(struct page) * PAGES_PER_SECTION; in register_page_bootmem_info_section() 213 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); in register_page_bootmem_info_section() 241 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node() 271 min_align = PAGES_PER_SECTION; in check_pfn_span() 315 pfns = min(nr_pages, PAGES_PER_SECTION in __add_pages() 534 pfns = min(nr_pages, PAGES_PER_SECTION in __remove_pages() 1219 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { in test_pages_in_a_zone()
|
| D | page_alloc.c | 427 if ((nr_initialised > PAGES_PER_SECTION) && in defer_init() 428 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init() 462 pfn &= (PAGES_PER_SECTION-1); in pfn_to_bitidx() 1852 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); in deferred_grow_zone() 1906 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) in deferred_grow_zone()
|
| /Linux-v5.4/arch/x86/include/asm/ |
| D | mmzone_32.h | 30 #define PAGES_PER_SECTION (MAX_NR_PAGES/MAX_SECTIONS) macro 37 return((int) physnode_map[(pfn) / PAGES_PER_SECTION]); in pfn_to_nid()
|
| /Linux-v5.4/drivers/base/ |
| D | memory.c | 136 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); in removable_show() 208 section_nr, pfn, pfn + PAGES_PER_SECTION); in pages_correctly_probed() 212 section_nr, pfn, pfn + PAGES_PER_SECTION); in pages_correctly_probed() 216 section_nr, pfn, pfn + PAGES_PER_SECTION); in pages_correctly_probed() 219 pfn += PAGES_PER_SECTION; in pages_correctly_probed() 234 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in memory_block_action() 400 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; in valid_zones_show() 495 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; in probe_store()
|
| D | node.c | 779 pfn = round_down(pfn + PAGES_PER_SECTION, in register_mem_sect_under_node() 780 PAGES_PER_SECTION) - 1; in register_mem_sect_under_node()
|
| /Linux-v5.4/arch/arm64/mm/ |
| D | init.c | 504 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap() 523 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) in free_unused_memmap() 524 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap()
|
| /Linux-v5.4/arch/arm/mm/ |
| D | init.c | 374 ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap() 400 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) in free_unused_memmap() 402 ALIGN(prev_end, PAGES_PER_SECTION)); in free_unused_memmap()
|
| /Linux-v5.4/include/linux/ |
| D | memory.h | 123 #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
|
| D | mmzone.h | 1138 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) macro 1139 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1157 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
|
| /Linux-v5.4/drivers/xen/ |
| D | balloon.c | 263 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); in additional_memory_resource() 304 balloon_hotplug = round_up(credit, PAGES_PER_SECTION); in reserve_additional_memory()
|
| /Linux-v5.4/arch/powerpc/mm/ |
| D | init_64.c | 96 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) in vmemmap_populated()
|
| /Linux-v5.4/arch/powerpc/platforms/pseries/ |
| D | hotplug-memory.c | 366 rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION); in lmb_is_removable()
|