Lines Matching refs:start_pfn

163 static void register_page_bootmem_info_section(unsigned long start_pfn)  in register_page_bootmem_info_section()  argument
169 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section()
197 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument
203 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section()
320 unsigned long start_pfn, in find_smallest_section_pfn() argument
325 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { in find_smallest_section_pfn()
326 ms = __pfn_to_section(start_pfn); in find_smallest_section_pfn()
331 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn()
334 if (zone && zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
337 return start_pfn; in find_smallest_section_pfn()
345 unsigned long start_pfn, in find_biggest_section_pfn() argument
353 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { in find_biggest_section_pfn()
371 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
382 if (zone_start_pfn == start_pfn) { in shrink_zone_span()
403 start_pfn); in shrink_zone_span()
425 if (start_pfn == pfn) in shrink_zone_span()
440 unsigned long start_pfn, unsigned long end_pfn) in shrink_pgdat_span() argument
449 if (pgdat_start_pfn == start_pfn) { in shrink_pgdat_span()
470 start_pfn); in shrink_pgdat_span()
493 if (start_pfn == pfn) in shrink_pgdat_span()
505 static void __remove_zone(struct zone *zone, unsigned long start_pfn) in __remove_zone() argument
512 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in __remove_zone()
513 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); in __remove_zone()
520 unsigned long start_pfn; in __remove_section() local
532 start_pfn = section_nr_to_pfn((unsigned long)scn_nr); in __remove_section()
533 __remove_zone(zone, start_pfn); in __remove_section()
665 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, in online_pages_range() argument
672 if (PageReserved(pfn_to_page(start_pfn))) in online_pages_range()
674 page = pfn_to_page(start_pfn + i); in online_pages_range()
679 online_mem_sections(start_pfn, start_pfn + nr_pages); in online_pages_range()
759 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, in resize_zone_range() argument
764 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
765 zone->zone_start_pfn = start_pfn; in resize_zone_range()
767 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
770 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, in resize_pgdat_range() argument
775 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range()
776 pgdat->node_start_pfn = start_pfn; in resize_pgdat_range()
778 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; in resize_pgdat_range()
781 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, in move_pfn_range_to_zone() argument
789 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
796 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
798 resize_pgdat_range(pgdat, start_pfn, nr_pages); in move_pfn_range_to_zone()
807 memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, in move_pfn_range_to_zone()
818 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, in default_kernel_zone_for_pfn() argument
827 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
834 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, in default_zone_for_pfn() argument
837 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, in default_zone_for_pfn()
840 bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); in default_zone_for_pfn()
841 bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); in default_zone_for_pfn()
858 struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, in zone_for_pfn_range() argument
862 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); in zone_for_pfn_range()
867 return default_zone_for_pfn(nid, start_pfn, nr_pages); in zone_for_pfn_range()
875 unsigned long start_pfn, unsigned long nr_pages) in move_pfn_range() argument
879 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); in move_pfn_range()
880 move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL); in move_pfn_range()
906 arg.start_pfn = pfn; in online_pages()
985 unsigned long start_pfn = PFN_DOWN(start); in hotadd_new_pgdat() local
1008 pgdat->node_start_pfn = start_pfn; in hotadd_new_pgdat()
1096 u64 start_pfn = PFN_DOWN(start); in check_hotplug_memory_range() local
1099 if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) || in check_hotplug_memory_range()
1256 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) in is_mem_section_removable() argument
1258 struct page *page = pfn_to_page(start_pfn); in is_mem_section_removable()
1276 int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, in test_pages_in_a_zone() argument
1284 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); in test_pages_in_a_zone()
1367 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) in do_migrate_range() argument
1376 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { in do_migrate_range()
1456 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) in offline_isolated_pages() argument
1458 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, in offline_isolated_pages()
1466 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, in check_pages_isolated_cb() argument
1471 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); in check_pages_isolated_cb()
1479 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) in check_pages_isolated() argument
1484 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, in check_pages_isolated()
1593 static int __ref __offline_pages(unsigned long start_pfn, in __offline_pages() argument
1605 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) in __offline_pages()
1611 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) in __offline_pages()
1616 nr_pages = end_pfn - start_pfn; in __offline_pages()
1619 ret = start_isolate_page_range(start_pfn, end_pfn, in __offline_pages()
1624 arg.start_pfn = start_pfn; in __offline_pages()
1633 pfn = start_pfn; in __offline_pages()
1644 pfn = scan_movable_pages(start_pfn, end_pfn); in __offline_pages()
1654 ret = dissolve_free_huge_pages(start_pfn, end_pfn); in __offline_pages()
1658 offlined_pages = check_pages_isolated(start_pfn, end_pfn); in __offline_pages()
1664 offline_isolated_pages(start_pfn, end_pfn); in __offline_pages()
1666 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); in __offline_pages()
1668 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); in __offline_pages()
1697 (unsigned long long) start_pfn << PAGE_SHIFT, in __offline_pages()
1701 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); in __offline_pages()
1706 int offline_pages(unsigned long start_pfn, unsigned long nr_pages) in offline_pages() argument
1708 return __offline_pages(start_pfn, start_pfn + nr_pages); in offline_pages()
1724 int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, in walk_memory_range() argument
1732 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in walk_memory_range()
1834 unsigned long start_pfn = pgdat->node_start_pfn; in try_offline_node() local
1835 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; in try_offline_node()
1838 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in try_offline_node()