Lines Matching full:zone
82 * specifying a zone (MMOP_ONLINE)
84 * "contig-zones": keep zone contiguous
302 * call this function after deciding the zone to which to
348 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
359 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
369 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
384 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
393 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
397 int nid = zone_to_nid(zone); in shrink_zone_span()
399 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
401 * If the section is smallest section in the zone, it need in shrink_zone_span()
402 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span()
404 * for shrinking zone. in shrink_zone_span()
406 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
407 zone_end_pfn(zone)); in shrink_zone_span()
409 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
410 zone->zone_start_pfn = pfn; in shrink_zone_span()
412 zone->zone_start_pfn = 0; in shrink_zone_span()
413 zone->spanned_pages = 0; in shrink_zone_span()
415 } else if (zone_end_pfn(zone) == end_pfn) { in shrink_zone_span()
417 * If the section is biggest section in the zone, it need in shrink_zone_span()
418 * shrink zone->spanned_pages. in shrink_zone_span()
420 * shrinking zone. in shrink_zone_span()
422 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span()
425 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span()
427 zone->zone_start_pfn = 0; in shrink_zone_span()
428 zone->spanned_pages = 0; in shrink_zone_span()
436 struct zone *zone; in update_pgdat_span() local
438 for (zone = pgdat->node_zones; in update_pgdat_span()
439 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
440 unsigned long end_pfn = zone_end_pfn(zone); in update_pgdat_span()
443 if (!zone->spanned_pages) in update_pgdat_span()
446 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
453 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span()
454 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
461 void __ref remove_pfn_range_from_zone(struct zone *zone, in remove_pfn_range_from_zone() argument
466 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone()
481 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So in remove_pfn_range_from_zone()
485 if (zone_is_zone_device(zone)) in remove_pfn_range_from_zone()
488 clear_zone_contiguous(zone); in remove_pfn_range_from_zone()
490 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in remove_pfn_range_from_zone()
493 set_zone_contiguous(zone); in remove_pfn_range_from_zone()
605 * zone ("present"). in online_pages_range()
623 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_online() argument
625 int nid = zone_to_nid(zone); in node_states_check_changes_online()
633 if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) in node_states_check_changes_online()
636 if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) in node_states_check_changes_online()
653 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, in resize_zone_range() argument
656 unsigned long old_end_pfn = zone_end_pfn(zone); in resize_zone_range()
658 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
659 zone->zone_start_pfn = start_pfn; in resize_zone_range()
661 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
684 * Associate the pfn range with the given zone, initializing the memmaps
685 * and resizing the pgdat/zone data to span the added pages. After this
690 * zone stats (e.g., nr_isolate_pageblock) are touched.
692 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, in move_pfn_range_to_zone() argument
696 struct pglist_data *pgdat = zone->zone_pgdat; in move_pfn_range_to_zone()
699 clear_zone_contiguous(zone); in move_pfn_range_to_zone()
701 if (zone_is_empty(zone)) in move_pfn_range_to_zone()
702 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
703 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
712 if (zone_is_zone_device(zone)) { in move_pfn_range_to_zone()
721 * with their zone properly. Not nice but set_pfnblock_flags_mask in move_pfn_range_to_zone()
722 * expects the zone spans the pfn range. All the pages in the range in move_pfn_range_to_zone()
725 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, in move_pfn_range_to_zone()
728 set_zone_contiguous(zone); in move_pfn_range_to_zone()
737 struct zone *zone) in auto_movable_stats_account_zone() argument
739 if (zone_idx(zone) == ZONE_MOVABLE) { in auto_movable_stats_account_zone()
740 stats->movable_pages += zone->present_pages; in auto_movable_stats_account_zone()
742 stats->kernel_early_pages += zone->present_early_pages; in auto_movable_stats_account_zone()
748 stats->movable_pages += zone->cma_pages; in auto_movable_stats_account_zone()
749 stats->kernel_early_pages -= zone->cma_pages; in auto_movable_stats_account_zone()
774 * satisfy the configured zone ratio. in auto_movable_stats_account_group()
792 struct zone *zone; in auto_movable_can_online_movable() local
798 for_each_populated_zone(zone) in auto_movable_can_online_movable()
799 auto_movable_stats_account_zone(&stats, zone); in auto_movable_can_online_movable()
802 zone = pgdat->node_zones + i; in auto_movable_can_online_movable()
803 if (populated_zone(zone)) in auto_movable_can_online_movable()
804 auto_movable_stats_account_zone(&stats, zone); in auto_movable_can_online_movable()
835 * Returns a default kernel memory zone for the given pfn range.
836 * If no kernel zone covers this pfn range it will automatically go
839 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, in default_kernel_zone_for_pfn()
846 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() local
848 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
849 return zone; in default_kernel_zone_for_pfn()
856 * Determine to which zone to online memory dynamically based on user
865 * We don't allow for hotplugged memory in a KERNEL zone to increase the
876 * hotunplugging, as implemented in hypervisors, could result in zone
905 static struct zone *auto_movable_zone_for_pfn(int nid, in auto_movable_zone_for_pfn()
930 * to the same zone, because dynamic memory groups only deal in auto_movable_zone_for_pfn()
966 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, in default_zone_for_pfn()
969 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, in default_zone_for_pfn()
971 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
976 * We inherit the existing zone in a simple case where zones do not in default_zone_for_pfn()
983 * If the range doesn't belong to any zone or two zones overlap in the in default_zone_for_pfn()
984 * given range then we use movable zone only if movable_node is in default_zone_for_pfn()
985 * enabled because we always online to a kernel zone by default. in default_zone_for_pfn()
990 struct zone *zone_for_pfn_range(int online_type, int nid, in zone_for_pfn_range()
1013 struct zone *zone = page_zone(page); in adjust_present_page_count() local
1014 const bool movable = zone_idx(zone) == ZONE_MOVABLE; in adjust_present_page_count()
1021 zone->present_early_pages += nr_pages; in adjust_present_page_count()
1022 zone->present_pages += nr_pages; in adjust_present_page_count()
1023 zone->zone_pgdat->node_present_pages += nr_pages; in adjust_present_page_count()
1032 struct zone *zone) in mhp_init_memmap_on_memory() argument
1041 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); in mhp_init_memmap_on_memory()
1075 struct zone *zone, struct memory_group *group) in online_pages() argument
1079 const int nid = zone_to_nid(zone); in online_pages()
1097 /* associate pfn range with the zone */ in online_pages()
1098 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); in online_pages()
1102 node_states_check_changes_online(nr_pages, zone, &arg); in online_pages()
1113 spin_lock_irqsave(&zone->lock, flags); in online_pages()
1114 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; in online_pages()
1115 spin_unlock_irqrestore(&zone->lock, flags); in online_pages()
1118 * If this zone is not populated, then it is not in zonelist. in online_pages()
1119 * This means the page allocator ignores this zone. in online_pages()
1122 if (!populated_zone(zone)) { in online_pages()
1124 setup_zone_pageset(zone); in online_pages()
1140 * zone to make sure the just onlined pages are properly distributed in online_pages()
1143 shuffle_zone(zone); in online_pages()
1162 remove_pfn_range_from_zone(zone, pfn, nr_pages); in online_pages()
1170 struct zone *z; in reset_node_present_pages()
1218 * The node we allocated has no zone fallback lists. For avoiding in hotadd_new_pgdat()
1600 * Confirm all pages in a range [start, end) belong to the same zone (skipping
1601 * memory holes). When true, return the zone.
1603 struct zone *test_pages_in_a_zone(unsigned long start_pfn, in test_pages_in_a_zone()
1607 struct zone *zone = NULL; in test_pages_in_a_zone() local
1618 /* Check if we got outside of the zone */ in test_pages_in_a_zone()
1619 if (zone && !zone_spans_pfn(zone, pfn)) in test_pages_in_a_zone()
1622 if (zone && page_zone(page) != zone) in test_pages_in_a_zone()
1624 zone = page_zone(page); in test_pages_in_a_zone()
1628 return zone; in test_pages_in_a_zone()
1759 * We have checked that migration range is on a single zone so in do_migrate_range()
1798 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_offline() argument
1800 struct pglist_data *pgdat = zone->zone_pgdat; in node_states_check_changes_offline()
1818 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline()
1819 arg->status_change_nid_normal = zone_to_nid(zone); in node_states_check_changes_offline()
1826 * If the zone is within the range of [0..ZONE_HIGHMEM), and in node_states_check_changes_offline()
1831 if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) in node_states_check_changes_offline()
1832 arg->status_change_nid_high = zone_to_nid(zone); in node_states_check_changes_offline()
1848 arg->status_change_nid = zone_to_nid(zone); in node_states_check_changes_offline()
1878 struct zone *zone; in offline_pages() local
1915 zone = test_pages_in_a_zone(start_pfn, end_pfn); in offline_pages()
1916 if (!zone) { in offline_pages()
1921 node = zone_to_nid(zone); in offline_pages()
1927 zone_pcp_disable(zone); in offline_pages()
1941 node_states_check_changes_offline(nr_pages, zone, &arg); in offline_pages()
2000 spin_lock_irqsave(&zone->lock, flags); in offline_pages()
2001 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; in offline_pages()
2002 spin_unlock_irqrestore(&zone->lock, flags); in offline_pages()
2005 zone_pcp_enable(zone); in offline_pages()
2014 if (!populated_zone(zone)) { in offline_pages()
2015 zone_pcp_reset(zone); in offline_pages()
2028 remove_pfn_range_from_zone(zone, start_pfn, nr_pages); in offline_pages()
2037 zone_pcp_enable(zone); in offline_pages()
2264 * Sense the online_type via the zone of the memory block. Offlining in try_offline_memory_block()