/Linux-v4.19/arch/x86/mm/ |
D | init.c | 260 unsigned long start_pfn, unsigned long end_pfn, in save_mr() argument 263 if (start_pfn < end_pfn) { in save_mr() 267 mr[nr_range].end = end_pfn<<PAGE_SHIFT; in save_mr() 338 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range() local 354 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 356 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 358 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 360 if (end_pfn > limit_pfn) in split_mem_range() 361 end_pfn = limit_pfn; in split_mem_range() 362 if (start_pfn < end_pfn) { in split_mem_range() [all …]
|
/Linux-v4.19/arch/mips/loongson64/loongson-3/ |
D | numa.c | 128 u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; in szmem() local 144 end_pfn = start_pfn + node_psize; in szmem() 149 start_pfn, end_pfn, num_physpages); in szmem() 153 PFN_PHYS(end_pfn - start_pfn), node); in szmem() 158 end_pfn = start_pfn + node_psize; in szmem() 163 start_pfn, end_pfn, num_physpages); in szmem() 167 PFN_PHYS(end_pfn - start_pfn), node); in szmem() 185 unsigned long start_pfn, end_pfn, freepfn; in node_mem_init() local 191 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 196 node, start_pfn, end_pfn, freepfn); in node_mem_init() [all …]
|
/Linux-v4.19/include/trace/events/ |
D | page_isolation.h | 14 unsigned long end_pfn, 17 TP_ARGS(start_pfn, end_pfn, fin_pfn), 21 __field(unsigned long, end_pfn) 27 __entry->end_pfn = end_pfn; 32 __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, 33 __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
|
D | compaction.h | 18 unsigned long end_pfn, 22 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), 26 __field(unsigned long, end_pfn) 33 __entry->end_pfn = end_pfn; 40 __entry->end_pfn, 49 unsigned long end_pfn, 53 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 60 unsigned long end_pfn, 64 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
|
/Linux-v4.19/mm/ |
D | page_isolation.c | 187 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in start_isolate_page_range() argument 195 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); in start_isolate_page_range() 198 pfn < end_pfn; in start_isolate_page_range() 224 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in undo_isolate_page_range() argument 231 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); in undo_isolate_page_range() 234 pfn < end_pfn; in undo_isolate_page_range() 251 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, in __test_page_isolated_in_pageblock() argument 256 while (pfn < end_pfn) { in __test_page_isolated_in_pageblock() 280 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, in test_pages_isolated() argument 292 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in test_pages_isolated() [all …]
|
D | page_idle.c | 127 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local 137 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read() 138 if (end_pfn > max_pfn) in page_idle_bitmap_read() 139 end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); in page_idle_bitmap_read() 141 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read() 172 unsigned long pfn, end_pfn; in page_idle_bitmap_write() local 182 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_write() 183 if (end_pfn > max_pfn) in page_idle_bitmap_write() 184 end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); in page_idle_bitmap_write() 186 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_write()
|
D | memory_hotplug.c | 222 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local 233 end_pfn = pgdat_end_pfn(pgdat); in register_page_bootmem_info_node() 236 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node() 321 unsigned long end_pfn) in find_smallest_section_pfn() argument 325 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { in find_smallest_section_pfn() 346 unsigned long end_pfn) in find_biggest_section_pfn() argument 352 pfn = end_pfn - 1; in find_biggest_section_pfn() 372 unsigned long end_pfn) in shrink_zone_span() argument 389 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span() 395 } else if (zone_end_pfn == end_pfn) { in shrink_zone_span() [all …]
|
D | compaction.c | 247 unsigned long end_pfn = zone_end_pfn(zone); in __reset_isolation_suitable() local 253 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in __reset_isolation_suitable() 435 unsigned long end_pfn, in isolate_freepages_block() argument 449 for (; blockpfn < end_pfn; blockpfn++, cursor++) { in isolate_freepages_block() 550 if (unlikely(blockpfn > end_pfn)) in isolate_freepages_block() 551 blockpfn = end_pfn; in isolate_freepages_block() 564 if (strict && blockpfn < end_pfn) in isolate_freepages_block() 568 if (blockpfn == end_pfn) in isolate_freepages_block() 593 unsigned long start_pfn, unsigned long end_pfn) in isolate_freepages_range() argument 604 for (; pfn < end_pfn; pfn += isolated, in isolate_freepages_range() [all …]
|
D | internal.h | 152 unsigned long end_pfn, struct zone *zone); 155 unsigned long end_pfn, struct zone *zone) in pageblock_pfn_to_page() argument 160 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); in pageblock_pfn_to_page() 211 unsigned long start_pfn, unsigned long end_pfn); 214 unsigned long low_pfn, unsigned long end_pfn); 434 unsigned long *end_pfn); 437 unsigned long *end_pfn) in mminit_validate_memmodel_limits() argument
|
D | sparse.c | 143 unsigned long *end_pfn) in mminit_validate_memmodel_limits() argument 154 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 157 *end_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 158 } else if (*end_pfn > max_sparsemem_pfn) { in mminit_validate_memmodel_limits() 161 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 163 *end_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 524 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in online_mem_sections() argument 528 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections() 543 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in offline_mem_sections() argument 547 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in offline_mem_sections()
|
D | page_alloc.c | 1223 unsigned long end_pfn = PFN_UP(end); in reserve_bootmem_region() local 1225 for (; start_pfn < end_pfn; start_pfn++) { in reserve_bootmem_region() 1355 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument 1361 end_pfn--; in __pageblock_pfn_to_page() 1363 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) in __pageblock_pfn_to_page() 1373 end_page = pfn_to_page(end_pfn); in __pageblock_pfn_to_page() 1477 unsigned long end_pfn) in deferred_free_pages() argument 1483 for (; pfn < end_pfn; pfn++) { in deferred_free_pages() 1506 unsigned long end_pfn) in deferred_init_pages() argument 1513 for (; pfn < end_pfn; pfn++) { in deferred_init_pages() [all …]
|
/Linux-v4.19/arch/powerpc/platforms/powernv/ |
D | memtrace.c | 75 u64 end_pfn = start_pfn + nr_pages - 1; in memtrace_offline_pages() local 77 if (walk_memory_range(start_pfn, end_pfn, NULL, in memtrace_offline_pages() 81 walk_memory_range(start_pfn, end_pfn, (void *)MEM_GOING_OFFLINE, in memtrace_offline_pages() 85 walk_memory_range(start_pfn, end_pfn, (void *)MEM_ONLINE, in memtrace_offline_pages() 90 walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, in memtrace_offline_pages() 102 u64 start_pfn, end_pfn, nr_pages; in memtrace_alloc_node() local 109 end_pfn = node_end_pfn(nid); in memtrace_alloc_node() 113 end_pfn = round_down(end_pfn - nr_pages, nr_pages); in memtrace_alloc_node() 115 for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { in memtrace_alloc_node()
|
/Linux-v4.19/arch/unicore32/mm/ |
D | init.c | 88 unsigned long end_pfn) in uc32_bootmem_init() argument 99 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); in uc32_bootmem_init() 101 __pfn_to_phys(end_pfn)); in uc32_bootmem_init() 109 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); in uc32_bootmem_init() 116 if (end >= end_pfn) in uc32_bootmem_init() 117 end = end_pfn; in uc32_bootmem_init() 129 if (end >= end_pfn) in uc32_bootmem_init() 130 end = end_pfn; in uc32_bootmem_init() 273 free_memmap(unsigned long start_pfn, unsigned long end_pfn) in free_memmap() argument 282 end_pg = pfn_to_page(end_pfn); in free_memmap()
|
/Linux-v4.19/arch/sparc/mm/ |
D | init_32.c | 69 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in calc_highpages() local 71 if (end_pfn <= max_low_pfn) in calc_highpages() 77 nr += end_pfn - start_pfn; in calc_highpages() 235 static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) in map_high_region() argument 240 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); in map_high_region() 243 for (tmp = start_pfn; tmp < end_pfn; tmp++) in map_high_region() 284 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in mem_init() local 286 if (end_pfn <= highstart_pfn) in mem_init() 292 map_high_region(start_pfn, end_pfn); in mem_init()
|
/Linux-v4.19/arch/sh/mm/ |
D | numa.c | 28 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 34 end_pfn = PFN_DOWN(end); in setup_bootmem_node() 41 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 49 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node()
|
D | init.c | 195 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); in allocate_pgdat() 217 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 226 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 228 end_pfn = memblock_region_memory_end_pfn(reg); in do_init_bootmem() 229 __add_active_range(0, start_pfn, end_pfn); in do_init_bootmem()
|
/Linux-v4.19/arch/s390/numa/ |
D | numa.c | 111 unsigned long start_pfn, end_pfn; in numa_setup_memory() local 116 end_pfn = 0; in numa_setup_memory() 120 if (t_end > end_pfn) in numa_setup_memory() 121 end_pfn = t_end; in numa_setup_memory() 123 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in numa_setup_memory()
|
/Linux-v4.19/include/linux/ |
D | node.h | 36 unsigned long end_pfn); 39 unsigned long end_pfn) in link_mem_sections() argument 58 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; in register_one_node() local 64 error = link_mem_sections(nid, start_pfn, end_pfn); in register_one_node()
|
D | page-isolation.h | 49 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 57 undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 63 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
/Linux-v4.19/arch/mips/sgi-ip27/ |
D | ip27-memory.c | 393 unsigned long start_pfn, end_pfn; in node_mem_init() local 395 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 405 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init() 413 start_pfn, end_pfn); in node_mem_init() 414 free_bootmem_with_active_regions(node, end_pfn); in node_mem_init() 467 unsigned long start_pfn, end_pfn; in paging_init() local 469 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in paging_init() 471 if (end_pfn > max_low_pfn) in paging_init() 472 max_low_pfn = end_pfn; in paging_init()
|
/Linux-v4.19/arch/x86/xen/ |
D | setup.c | 254 unsigned long end_pfn, unsigned long nr_pages) in xen_set_identity_and_release_chunk() argument 259 WARN_ON(start_pfn > end_pfn); in xen_set_identity_and_release_chunk() 262 end = min(end_pfn, nr_pages); in xen_set_identity_and_release_chunk() 281 set_phys_range_identity(start_pfn, end_pfn); in xen_set_identity_and_release_chunk() 387 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_set_identity_and_remap_chunk() argument 392 unsigned long n = end_pfn - start_pfn; in xen_set_identity_and_remap_chunk() 434 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) in xen_set_identity_and_remap_chunk() 443 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_count_remap_pages() argument 449 return remap_pages + min(end_pfn, nr_pages) - start_pfn; in xen_count_remap_pages() 453 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, in xen_foreach_remap_area() argument [all …]
|
/Linux-v4.19/arch/arm64/mm/ |
D | numa.c | 230 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 237 if (start_pfn >= end_pfn) in setup_node_data() 254 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_node_data() 374 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 376 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 377 setup_node_data(nid, start_pfn, end_pfn); in numa_register_nodes()
|
/Linux-v4.19/arch/sh/kernel/ |
D | swsusp.c | 25 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave() local 27 return (pfn >= begin_pfn) && (pfn < end_pfn); in pfn_is_nosave()
|
D | setup.c | 196 unsigned long end_pfn) in __add_active_range() argument 204 end = end_pfn << PAGE_SHIFT; in __add_active_range() 213 start_pfn, end_pfn); in __add_active_range() 237 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
/Linux-v4.19/drivers/hv/ |
D | hv_balloon.c | 446 unsigned long end_pfn; member 456 unsigned long end_pfn; member 593 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) in has_pfn_is_backed() 615 (pfn < has->end_pfn) && in hv_page_offline_check() 783 if ((pfn < has->start_pfn) || (pfn >= has->end_pfn)) in hv_online_page() 806 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) in pfn_covered() 822 gap->end_pfn = start_pfn; in pfn_covered() 832 if ((start_pfn + pfn_cnt) > has->end_pfn) { in pfn_covered() 833 residual = (start_pfn + pfn_cnt - has->end_pfn); in pfn_covered() 841 has->end_pfn += new_inc; in pfn_covered() [all …]
|