Lines Matching full:zone

306  * call this function after deciding the zone to which to
354 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
365 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
375 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
390 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
399 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
403 int nid = zone_to_nid(zone); in shrink_zone_span()
405 zone_span_writelock(zone); in shrink_zone_span()
406 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
408 * If the section is smallest section in the zone, it need in shrink_zone_span()
409 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span()
411 * for shrinking zone. in shrink_zone_span()
413 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
414 zone_end_pfn(zone)); in shrink_zone_span()
416 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
417 zone->zone_start_pfn = pfn; in shrink_zone_span()
419 zone->zone_start_pfn = 0; in shrink_zone_span()
420 zone->spanned_pages = 0; in shrink_zone_span()
422 } else if (zone_end_pfn(zone) == end_pfn) { in shrink_zone_span()
424 * If the section is biggest section in the zone, it need in shrink_zone_span()
425 * shrink zone->spanned_pages. in shrink_zone_span()
427 * shrinking zone. in shrink_zone_span()
429 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span()
432 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span()
434 zone->zone_start_pfn = 0; in shrink_zone_span()
435 zone->spanned_pages = 0; in shrink_zone_span()
438 zone_span_writeunlock(zone); in shrink_zone_span()
444 struct zone *zone; in update_pgdat_span() local
446 for (zone = pgdat->node_zones; in update_pgdat_span()
447 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
448 unsigned long zone_end_pfn = zone->zone_start_pfn + in update_pgdat_span()
449 zone->spanned_pages; in update_pgdat_span()
452 if (!zone->spanned_pages) in update_pgdat_span()
455 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
462 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span()
463 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
470 void __ref remove_pfn_range_from_zone(struct zone *zone, in remove_pfn_range_from_zone() argument
475 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone()
491 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So in remove_pfn_range_from_zone()
495 if (zone_idx(zone) == ZONE_DEVICE) in remove_pfn_range_from_zone()
499 clear_zone_contiguous(zone); in remove_pfn_range_from_zone()
501 pgdat_resize_lock(zone->zone_pgdat, &flags); in remove_pfn_range_from_zone()
502 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in remove_pfn_range_from_zone()
504 pgdat_resize_unlock(zone->zone_pgdat, &flags); in remove_pfn_range_from_zone()
506 set_zone_contiguous(zone); in remove_pfn_range_from_zone()
619 * zone ("present"). in online_pages_range()
630 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_online() argument
632 int nid = zone_to_nid(zone); in node_states_check_changes_online()
640 if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) in node_states_check_changes_online()
643 if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) in node_states_check_changes_online()
660 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, in resize_zone_range() argument
663 unsigned long old_end_pfn = zone_end_pfn(zone); in resize_zone_range()
665 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
666 zone->zone_start_pfn = start_pfn; in resize_zone_range()
668 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
683 * Associate the pfn range with the given zone, initializing the memmaps
684 * and resizing the pgdat/zone data to span the added pages. After this
689 * zone stats (e.g., nr_isolate_pageblock) are touched.
691 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, in move_pfn_range_to_zone() argument
695 struct pglist_data *pgdat = zone->zone_pgdat; in move_pfn_range_to_zone()
699 clear_zone_contiguous(zone); in move_pfn_range_to_zone()
701 /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ in move_pfn_range_to_zone()
703 zone_span_writelock(zone); in move_pfn_range_to_zone()
704 if (zone_is_empty(zone)) in move_pfn_range_to_zone()
705 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
706 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
707 zone_span_writeunlock(zone); in move_pfn_range_to_zone()
713 * with their zone properly. Not nice but set_pfnblock_flags_mask in move_pfn_range_to_zone()
714 * expects the zone spans the pfn range. All the pages in the range in move_pfn_range_to_zone()
717 memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, in move_pfn_range_to_zone()
720 set_zone_contiguous(zone); in move_pfn_range_to_zone()
724 * Returns a default kernel memory zone for the given pfn range.
725 * If no kernel zone covers this pfn range it will automatically go
728 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, in default_kernel_zone_for_pfn()
735 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() local
737 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
738 return zone; in default_kernel_zone_for_pfn()
744 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, in default_zone_for_pfn()
747 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, in default_zone_for_pfn()
749 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
754 * We inherit the existing zone in a simple case where zones do not in default_zone_for_pfn()
761 * If the range doesn't belong to any zone or two zones overlap in the in default_zone_for_pfn()
762 * given range then we use movable zone only if movable_node is in default_zone_for_pfn()
763 * enabled because we always online to a kernel zone by default. in default_zone_for_pfn()
768 struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, in zone_for_pfn_range()
784 struct zone *zone; in online_pages() local
796 /* associate pfn range with the zone */ in online_pages()
797 zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages); in online_pages()
798 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); in online_pages()
802 node_states_check_changes_online(nr_pages, zone, &arg); in online_pages()
813 spin_lock_irqsave(&zone->lock, flags); in online_pages()
814 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; in online_pages()
815 spin_unlock_irqrestore(&zone->lock, flags); in online_pages()
818 * If this zone is not populated, then it is not in zonelist. in online_pages()
819 * This means the page allocator ignores this zone. in online_pages()
822 if (!populated_zone(zone)) { in online_pages()
824 setup_zone_pageset(zone); in online_pages()
828 zone->present_pages += nr_pages; in online_pages()
830 pgdat_resize_lock(zone->zone_pgdat, &flags); in online_pages()
831 zone->zone_pgdat->node_present_pages += nr_pages; in online_pages()
832 pgdat_resize_unlock(zone->zone_pgdat, &flags); in online_pages()
837 zone_pcp_update(zone); in online_pages()
845 * zone to make sure the just onlined pages are properly distributed in online_pages()
848 shuffle_zone(zone); in online_pages()
866 remove_pfn_range_from_zone(zone, pfn, nr_pages); in online_pages()
874 struct zone *z; in reset_node_present_pages()
922 * The node we allocated has no zone fallback lists. For avoiding in hotadd_new_pgdat()
1186 * Confirm all pages in a range [start, end) belong to the same zone (skipping
1187 * memory holes). When true, return the zone.
1189 struct zone *test_pages_in_a_zone(unsigned long start_pfn, in test_pages_in_a_zone()
1193 struct zone *zone = NULL; in test_pages_in_a_zone() local
1211 /* Check if we got outside of the zone */ in test_pages_in_a_zone()
1212 if (zone && !zone_spans_pfn(zone, pfn + i)) in test_pages_in_a_zone()
1215 if (zone && page_zone(page) != zone) in test_pages_in_a_zone()
1217 zone = page_zone(page); in test_pages_in_a_zone()
1221 return zone; in test_pages_in_a_zone()
1341 * We have checked that migration range is on a single zone so in do_migrate_range()
1378 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_offline() argument
1380 struct pglist_data *pgdat = zone->zone_pgdat; in node_states_check_changes_offline()
1398 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline()
1399 arg->status_change_nid_normal = zone_to_nid(zone); in node_states_check_changes_offline()
1406 * If the zone is within the range of [0..ZONE_HIGHMEM), and in node_states_check_changes_offline()
1411 if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) in node_states_check_changes_offline()
1412 arg->status_change_nid_high = zone_to_nid(zone); in node_states_check_changes_offline()
1428 arg->status_change_nid = zone_to_nid(zone); in node_states_check_changes_offline()
1457 struct zone *zone; in offline_pages() local
1487 zone = test_pages_in_a_zone(start_pfn, end_pfn); in offline_pages()
1488 if (!zone) { in offline_pages()
1493 node = zone_to_nid(zone); in offline_pages()
1506 node_states_check_changes_offline(nr_pages, zone, &arg); in offline_pages()
1567 drain_all_pages(zone); in offline_pages()
1579 spin_lock_irqsave(&zone->lock, flags); in offline_pages()
1580 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; in offline_pages()
1581 spin_unlock_irqrestore(&zone->lock, flags); in offline_pages()
1585 zone->present_pages -= nr_pages; in offline_pages()
1587 pgdat_resize_lock(zone->zone_pgdat, &flags); in offline_pages()
1588 zone->zone_pgdat->node_present_pages -= nr_pages; in offline_pages()
1589 pgdat_resize_unlock(zone->zone_pgdat, &flags); in offline_pages()
1593 if (!populated_zone(zone)) { in offline_pages()
1594 zone_pcp_reset(zone); in offline_pages()
1597 zone_pcp_update(zone); in offline_pages()
1608 remove_pfn_range_from_zone(zone, start_pfn, nr_pages); in offline_pages()