Lines Matching full:zone
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
100 * shuffle the whole zone).
109 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
133 struct zone *zone; member
378 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
439 * prev_end_pfn static that contains the end of previous zone in defer_init()
583 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
591 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
592 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
593 sp = zone->spanned_pages; in page_outside_zone_boundaries()
594 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
596 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
599 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
600 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
606 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
610 if (zone != page_zone(page)) in page_is_consistent()
616 * Temporary debugging check for pages not lying within a given zone.
618 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
620 if (page_outside_zone_boundaries(zone, page)) in bad_range()
622 if (!page_is_consistent(zone, page)) in bad_range()
628 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
758 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
771 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
776 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
786 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
789 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
791 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
807 * (d) a page and its buddy are in the same zone.
810 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
824 * zone check is done late to avoid uselessly calculating in page_is_buddy()
825 * zone/node ids for pages that could never merge. in page_is_buddy()
836 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
843 capc->cc->zone == zone ? capc : NULL; in task_capc()
872 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
886 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
889 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
896 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
899 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
910 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
913 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
918 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
928 zone->free_area[order].nr_free--; in del_page_from_free_list()
987 struct zone *zone, unsigned int order, in __free_one_page() argument
990 struct capture_control *capc = task_capc(zone); in __free_one_page()
999 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1004 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1007 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1012 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1028 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1030 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1045 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1072 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1074 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1333 * Assumes all pages on list are in same zone, and of same order.
1336 * If the zone was previously in an "all pages pinned" state then look to
1339 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1342 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1392 * under zone->lock. It is believed the overhead of in free_pcppages_bulk()
1403 spin_lock(&zone->lock); in free_pcppages_bulk()
1404 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1418 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); in free_pcppages_bulk()
1421 spin_unlock(&zone->lock); in free_pcppages_bulk()
1424 static void free_one_page(struct zone *zone, in free_one_page() argument
1429 spin_lock(&zone->lock); in free_one_page()
1430 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1434 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1435 spin_unlock(&zone->lock); in free_one_page()
1439 unsigned long zone, int nid) in __init_single_page() argument
1442 set_page_links(page, zone, nid, pfn); in __init_single_page()
1451 if (!is_highmem_idx(zone)) in __init_single_page()
1469 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1471 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1613 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1622 * belong to a single zone. We assume that a border between node0 and node1
1629 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1644 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1656 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1658 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1662 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1666 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1669 block_end_pfn, zone)) in set_zone_contiguous()
1675 zone->contiguous = true; in set_zone_contiguous()
1678 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1680 zone->contiguous = false; in clear_zone_contiguous()
1769 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1774 int nid = zone_to_nid(zone); in deferred_init_pages()
1776 int zid = zone_idx(zone); in deferred_init_pages()
1795 * This function is meant to pre-load the iterator for the zone init.
1801 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1808 * Start out by walking through the ranges in this zone that have in deferred_init_mem_pfn_range_in_zone()
1812 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1835 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1844 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1851 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1862 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
1883 struct zone *zone = arg; in deferred_init_memmap_chunk() local
1886 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
1893 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
1913 struct zone *zone; in deferred_init_memmap() local
1935 * Once we unlock here, the zone cannot be grown anymore, thus if an in deferred_init_memmap()
1936 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
1941 /* Only the highest zone is deferred so find it */ in deferred_init_memmap()
1943 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1944 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1948 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_init_memmap()
1949 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1959 .fn_arg = zone, in deferred_init_memmap()
1968 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
1972 /* Sanity check that the next zone really is unpopulated */ in deferred_init_memmap()
1973 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1983 * If this zone has deferred pages, try to grow it by initializing enough
1989 * Return true when zone was grown, otherwise return false. We return true even
1998 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2001 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2007 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2008 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2014 * If someone grew this zone while we were waiting for spinlock, return in deferred_grow_zone()
2022 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_grow_zone()
2023 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2040 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2065 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2067 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2074 struct zone *zone; in page_alloc_init_late() local
2093 for_each_populated_zone(zone) in page_alloc_init_late()
2094 zone_pcp_update(zone); in page_alloc_init_late()
2112 for_each_populated_zone(zone) in page_alloc_init_late()
2113 set_zone_contiguous(zone); in page_alloc_init_late()
2161 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2169 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2177 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2180 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2308 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2317 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2321 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2322 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2348 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2351 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2354 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2363 static int move_freepages(struct zone *zone, in move_freepages() argument
2392 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2393 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2396 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2404 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2419 /* Do not cross zone boundaries */ in move_freepages_block()
2420 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2422 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2425 return move_freepages(zone, start_page, end_page, migratetype, in move_freepages_block()
2473 static inline void boost_watermark(struct zone *zone) in boost_watermark() argument
2485 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2488 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2504 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2516 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2543 boost_watermark(zone); in steal_suitable_fallback()
2545 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2551 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2575 /* moving whole block can fail due to zone boundary conditions */ in steal_suitable_fallback()
2590 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2634 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2641 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. in reserve_highatomic_pageblock()
2644 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2645 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2648 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2651 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2658 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2660 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2664 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2682 struct zone *zone; in unreserve_highatomic_pageblock() local
2687 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2693 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2697 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2699 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2720 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2722 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2735 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2738 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2742 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2759 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2784 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2810 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2826 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2838 * Call me with the zone->lock already held.
2841 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2849 * allocating from CMA when over half of the zone's free memory in __rmqueue()
2853 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
2854 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
2855 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2861 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2864 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2866 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2880 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2886 spin_lock(&zone->lock); in rmqueue_bulk()
2888 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2909 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2919 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2920 spin_unlock(&zone->lock); in rmqueue_bulk()
2933 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2942 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
2948 * Drain pcplists of the indicated processor and zone.
2954 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2961 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
2965 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
2978 struct zone *zone; in drain_pages() local
2980 for_each_populated_zone(zone) { in drain_pages()
2981 drain_pages_zone(cpu, zone); in drain_pages()
2988 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2989 * the single zone's pages.
2991 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
2995 if (zone) in drain_local_pages()
2996 drain_pages_zone(cpu, zone); in drain_local_pages()
3015 drain_local_pages(drain->zone); in drain_local_pages_wq()
3022 * When zone parameter is non-NULL, spill just the single zone's pages.
3026 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3045 * a zone. Such callers are primarily CMA and memory hotplug and need in drain_all_pages()
3049 if (!zone) in drain_all_pages()
3062 struct zone *z; in drain_all_pages()
3065 if (zone) { in drain_all_pages()
3066 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
3088 drain->zone = zone; in drain_all_pages()
3105 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3112 if (zone_is_empty(zone)) in mark_free_pages()
3115 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3117 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3118 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3127 if (page_zone(page) != zone) in mark_free_pages()
3136 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3149 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3167 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3183 free_one_page(zone, page, pfn, 0, migratetype, in free_unref_page_commit()
3190 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3195 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
3277 struct zone *zone; in __isolate_free_page() local
3282 zone = page_zone(page); in __isolate_free_page()
3292 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3293 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3296 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3301 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3333 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3335 /* zone lock should be held when this function is called */ in __putback_isolated_page()
3336 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3339 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3348 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
3371 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
3380 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
3396 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3397 struct zone *zone, gfp_t gfp_flags, in rmqueue_pcplist() argument
3406 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3408 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3411 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3418 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3421 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3422 struct zone *zone, unsigned int order, in rmqueue() argument
3436 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3447 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3458 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3463 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3465 spin_unlock(&zone->lock); in rmqueue()
3468 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3472 zone_statistics(preferred_zone, zone); in rmqueue()
3477 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3478 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3479 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3482 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3564 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3590 * one free page of a suitable size. Checking now avoids taking the zone lock
3593 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3657 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3664 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3704 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3717 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3719 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3723 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3730 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3731 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3732 * premature use of a lower zone may cause lowmem pressure problems that
3733 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3738 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3749 if (!zone) in alloc_flags_nofragment()
3752 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3757 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3761 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3792 struct zone *zone; in get_page_from_freelist() local
3798 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist()
3803 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3810 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3832 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3835 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3836 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3842 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3850 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3851 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
3857 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
3858 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3865 * Watermark failed for this zone, but see if we can in get_page_from_freelist()
3866 * grow this zone if it contains deferred pages. in get_page_from_freelist()
3869 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3879 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3882 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3892 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3901 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3911 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3916 /* Try again if zone has deferred pages */ in get_page_from_freelist()
3918 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4115 * At least in one zone compaction wasn't deferred or skipped, so let's in __alloc_pages_direct_compact()
4129 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4131 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4132 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4167 * compaction considers all the zone as desperately out of memory in should_compact_retry()
4241 struct zone *zone; in should_compact_retry() local
4253 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4255 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4377 struct zone *zone; in wake_all_kswapds() local
4381 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4383 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4384 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4385 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4488 struct zone *zone; in should_reclaim_retry() local
4517 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4521 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4524 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4525 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4531 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4545 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4652 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4899 /* Dirty zone balancing only done in the fast path */ in prepare_alloc_pages()
4903 * The preferred zone is used for statistics but crucially it is in prepare_alloc_pages()
4943 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
5233 * @offset: The zone index of the highest zone
5236 * high watermark within all zones at or below a given zone index. For each
5237 * zone, the number of pages is calculated as:
5246 struct zone *zone; in nr_free_zone_pages() local
5253 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5254 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5255 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5278 static inline void show_node(struct zone *zone) in show_node() argument
5281 printk("Node %d ", zone_to_nid(zone)); in show_node()
5291 struct zone *zone; in si_mem_available() local
5297 for_each_zone(zone) in si_mem_available()
5298 wmark_low += low_wmark_pages(zone); in si_mem_available()
5359 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5361 if (is_highmem(zone)) { in si_meminfo_node()
5362 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5363 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5438 struct zone *zone; in show_free_areas() local
5441 for_each_populated_zone(zone) { in show_free_areas()
5442 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5446 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5529 for_each_populated_zone(zone) { in show_free_areas()
5532 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5537 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5539 show_node(zone); in show_free_areas()
5562 zone->name, in show_free_areas()
5563 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5564 K(min_wmark_pages(zone)), in show_free_areas()
5565 K(low_wmark_pages(zone)), in show_free_areas()
5566 K(high_wmark_pages(zone)), in show_free_areas()
5567 K(zone->nr_reserved_highatomic), in show_free_areas()
5568 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5569 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5570 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5571 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5572 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5573 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5574 K(zone->present_pages), in show_free_areas()
5575 K(zone_managed_pages(zone)), in show_free_areas()
5576 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5577 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
5578 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5580 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5581 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5584 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5588 for_each_populated_zone(zone) { in show_free_areas()
5593 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5595 show_node(zone); in show_free_areas()
5596 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5598 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5600 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5612 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5629 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5631 zoneref->zone = zone; in zoneref_set_zone()
5632 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5636 * Builds allocation fallback zone lists.
5642 struct zone *zone; in build_zonerefs_node() local
5648 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5649 if (managed_zone(zone)) { in build_zonerefs_node()
5650 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5754 * This results in maximum locality--normal zone overflows into local
5755 * DMA zone, if any--but risks exhausting DMA zone.
5773 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5788 zonerefs->zone = NULL; in build_thisnode_zonelists()
5793 * Build zonelists ordered by zone and nodes within zones.
5794 * This results in conserving DMA zone[s] until all Normal memory is
5796 * may still exist in local DMA zone.
5834 * I.e., first node id of first zone in arg node's generic zonelist.
5845 return zone_to_nid(z->zone); in local_memory_node()
5886 zonerefs->zone = NULL; in build_zonelists()
5905 * Other parts of the kernel may not check if the zone is available.
5940 * i.e., the node of the first zone in the generic zonelist. in __build_all_zonelists()
5964 * each zone will be allocated later when the per cpu in build_all_zonelists_init()
6002 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
6016 pr_info("Policy zone: %s\n", zone_names[policy_zone]); in build_all_zonelists()
6020 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6022 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6026 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6049 * zone stats (e.g., nr_isolate_pageblock) are touched.
6051 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
6070 if (zone == ZONE_DEVICE) { in memmap_init_zone()
6086 if (overlap_memmap_init(zone, &pfn)) in memmap_init_zone()
6093 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
6111 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6117 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6119 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6123 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6143 * phase for it to be fully associated with a zone. in memmap_init_zone_device()
6179 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6183 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6184 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6189 unsigned long zone, in memmap_init() argument
6202 memmap_init_zone(size, nid, zone, start_pfn, in memmap_init()
6208 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6215 * size of the zone. in zone_batchsize()
6217 batch = zone_managed_pages(zone) / 1024; in zone_batchsize()
6322 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
6327 (zone_managed_pages(zone) / in pageset_set_high_and_batch()
6330 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
6333 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
6335 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6338 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
6341 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6344 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6346 zone_pageset_init(zone, cpu); in setup_zone_pageset()
6356 struct zone *zone; in setup_per_cpu_pageset() local
6359 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6360 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6381 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6388 zone->pageset = &boot_pageset; in zone_pcp_init()
6390 if (populated_zone(zone)) in zone_pcp_init()
6391 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", in zone_pcp_init()
6392 zone->name, zone->present_pages, in zone_pcp_init()
6393 zone_batchsize(zone)); in zone_pcp_init()
6396 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
6400 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6401 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
6406 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6409 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
6411 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
6414 zone_init_free_lists(zone); in init_currently_empty_zone()
6415 zone->initialized = 1; in init_currently_empty_zone()
6448 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6450 * increasing memory addresses so that the "highest" populated zone is used
6469 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6473 * is distributed. This helper function adjusts the zone ranges
6475 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6506 * Return the number of pages a zone spans in a node, including holes
6522 /* Get the start and end of the zone */ in zone_spanned_pages_in_node()
6529 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
6533 /* Move the zone boundaries inside the node if necessary */ in zone_spanned_pages_in_node()
6574 /* Return the number of page frames in holes in a zone on a node */
6633 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6651 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6653 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6654 zone->spanned_pages = size; in calculate_node_totalpages()
6655 zone->present_pages = real_size; in calculate_node_totalpages()
6669 * Calculate the size of the zone->blockflags rounded to an unsigned long
6689 struct zone *zone, in setup_usemap() argument
6694 zone->pageblock_flags = NULL; in setup_usemap()
6696 zone->pageblock_flags = in setup_usemap()
6699 if (!zone->pageblock_flags) in setup_usemap()
6700 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", in setup_usemap()
6701 usemapsize, zone->name, pgdat->node_id); in setup_usemap()
6705 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
6753 * the zone and SPARSEMEM is in use. If there are holes within the in calc_memmap_size()
6754 * zone, each populated memory region may cost us one or two extra in calc_memmap_size()
6803 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
6806 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
6807 zone_set_nid(zone, nid); in zone_init_internals()
6808 zone->name = zone_names[idx]; in zone_init_internals()
6809 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
6810 spin_lock_init(&zone->lock); in zone_init_internals()
6811 zone_seqlock_init(zone); in zone_init_internals()
6812 zone_pcp_init(zone); in zone_init_internals()
6816 * Set up the zone data structures
6835 * Set up the zone data structures:
6852 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
6854 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
6856 size = zone->spanned_pages; in free_area_init_core()
6857 freesize = zone->present_pages; in free_area_init_core()
6861 * is used by this zone for memmap. This affects the watermark in free_area_init_core()
6870 " %s zone: %lu pages used for memmap\n", in free_area_init_core()
6873 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", in free_area_init_core()
6880 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", in free_area_init_core()
6896 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
6902 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
6903 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
6926 * The zone's endpoints aren't required to be MAX_ORDER in alloc_node_mem_map()
7015 * Use a fake node/zone (0) for now. Some of these pages in init_unavailable_range()
7156 * Sum pages in active regions for movable zone.
7176 * Find the PFN the Movable zone begins in each node. Kernel memory
7394 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7395 if (populated_zone(zone)) { in check_for_memory()
7415 * free_area_init - Initialise all pg_data_t and zone data
7416 * @max_zone_pfn: an array of max PFNs for each zone
7420 * zone in each node and their holes is calculated. If the maximum PFN
7421 * between two adjacent zones match, it is assumed that the zone is empty.
7423 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7430 int i, nid, zone; in free_area_init() local
7433 /* Record where the zone boundaries are */ in free_area_init()
7444 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7446 zone = i; in free_area_init()
7448 if (zone == ZONE_MOVABLE) in free_area_init()
7451 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
7452 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
7453 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
7462 /* Print out the zone ranges */ in free_area_init()
7463 pr_info("Zone ranges:\n"); in free_area_init()
7480 pr_info("Movable zone start for each node\n"); in free_area_init()
7678 * set_dma_reserve - set the specified number of pages reserved in the first zone
7681 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7682 * In the DMA zone, a significant percentage may be consumed by kernel image
7685 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7761 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
7763 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
7765 /* Find valid and maximum lowmem_reserve in the zone */ in calculate_totalreserve_pages()
7767 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7768 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7772 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
7787 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
7789 * pages are left in the zone after a successful __alloc_pages().
7798 struct zone *zone = pgdat->node_zones + j; in setup_per_zone_lowmem_reserve() local
7799 unsigned long managed_pages = zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
7801 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
7805 struct zone *lower_zone; in setup_per_zone_lowmem_reserve()
7831 struct zone *zone; in __setup_per_zone_wmarks() local
7835 for_each_zone(zone) { in __setup_per_zone_wmarks()
7836 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
7837 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
7840 for_each_zone(zone) { in __setup_per_zone_wmarks()
7843 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
7844 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
7846 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
7858 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
7860 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
7863 * If it's a lowmem zone, reserve a number of pages in __setup_per_zone_wmarks()
7864 * proportionate to the zone's size. in __setup_per_zone_wmarks()
7866 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
7875 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
7878 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
7879 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
7880 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
7882 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
7893 * Ensures that the watermark[min,low,high] values for each zone are set
8002 struct zone *zone; in setup_min_unmapped_ratio() local
8007 for_each_zone(zone) in setup_min_unmapped_ratio()
8008 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8030 struct zone *zone; in setup_min_slab_ratio() local
8035 for_each_zone(zone) in setup_min_slab_ratio()
8036 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8062 * if in function of the boot time zone sizes.
8080 static void __zone_pcp_update(struct zone *zone) in __zone_pcp_update() argument
8085 pageset_set_high_and_batch(zone, in __zone_pcp_update()
8086 per_cpu_ptr(zone->pageset, cpu)); in __zone_pcp_update()
8090 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8091 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8097 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
8120 for_each_populated_zone(zone) in percpu_pagelist_fraction_sysctl_handler()
8121 __zone_pcp_update(zone); in percpu_pagelist_fraction_sysctl_handler()
8280 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8315 * If the zone is movable and we have ruled out all reserved in has_unmovable_pages()
8319 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8402 /* [start, end) must belong to a single zone. */
8412 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8437 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8462 * aligned. The PFN range must belong to a single zone.
8482 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
8548 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
8614 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
8640 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
8645 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
8674 struct zone *zone; in alloc_contig_pages() local
8678 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
8680 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8682 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
8683 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
8684 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
8686 * We release the zone lock here because in alloc_contig_pages()
8687 * alloc_contig_range() will also lock the zone in alloc_contig_pages()
8692 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8697 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8701 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8722 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8725 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
8728 __zone_pcp_update(zone); in zone_pcp_update()
8732 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
8740 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8742 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8743 drain_zonestat(zone, pset); in zone_pcp_reset()
8745 free_percpu(zone->pageset); in zone_pcp_reset()
8746 zone->pageset = &boot_pageset; in zone_pcp_reset()
8753 * All pages in the range must be in a single zone, must not contain holes,
8760 struct zone *zone; in __offline_isolated_pages() local
8765 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
8766 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8791 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
8794 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8800 struct zone *zone = page_zone(page); in is_free_buddy_page() local
8805 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8812 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
8822 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
8841 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
8845 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
8857 struct zone *zone = page_zone(page); in take_page_off_buddy() local
8863 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
8873 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
8874 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
8882 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()