Lines Matching full:zone
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
101 * shuffle the whole zone).
121 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
152 struct zone *zone; member
357 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
420 * prev_end_pfn static that contains the end of previous zone in defer_init()
572 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
580 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
581 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
582 sp = zone->spanned_pages; in page_outside_zone_boundaries()
583 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
585 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
588 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
589 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
595 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
597 if (zone != page_zone(page)) in page_is_consistent()
603 * Temporary debugging check for pages not lying within a given zone.
605 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
607 if (page_outside_zone_boundaries(zone, page)) in bad_range()
609 if (!page_is_consistent(zone, page)) in bad_range()
615 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
781 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
794 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
799 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
809 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
812 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
814 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
884 * (d) a page and its buddy are in the same zone.
887 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
901 * zone check is done late to avoid uselessly calculating in page_is_buddy()
902 * zone/node ids for pages that could never merge. in page_is_buddy()
913 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
920 capc->cc->zone == zone ? capc : NULL; in task_capc()
949 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
963 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
966 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
973 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
976 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
987 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
990 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
995 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
1005 zone->free_area[order].nr_free--; in del_page_from_free_list()
1060 struct zone *zone, unsigned int order, in __free_one_page() argument
1063 struct capture_control *capc = task_capc(zone); in __free_one_page()
1072 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1077 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1080 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1085 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1099 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1101 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1116 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1143 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1145 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1433 * Assumes all pages on list are in same zone, and of same order.
1436 * If the zone was previously in an "all pages pinned" state then look to
1439 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1442 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1501 * under zone->lock. It is believed the overhead of in free_pcppages_bulk()
1519 spin_lock(&zone->lock); in free_pcppages_bulk()
1520 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1539 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1542 spin_unlock(&zone->lock); in free_pcppages_bulk()
1545 static void free_one_page(struct zone *zone, in free_one_page() argument
1552 spin_lock_irqsave(&zone->lock, flags); in free_one_page()
1553 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1557 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1558 spin_unlock_irqrestore(&zone->lock, flags); in free_one_page()
1562 unsigned long zone, int nid) in __init_single_page() argument
1565 set_page_links(page, zone, nid, pfn); in __init_single_page()
1574 if (!is_highmem_idx(zone)) in __init_single_page()
1592 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1594 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1641 struct zone *zone = page_zone(page); in __free_pages_ok() local
1648 spin_lock_irqsave(&zone->lock, flags); in __free_pages_ok()
1649 if (unlikely(has_isolate_pageblock(zone) || in __free_pages_ok()
1653 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in __free_pages_ok()
1654 spin_unlock_irqrestore(&zone->lock, flags); in __free_pages_ok()
1750 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1757 * belong to a single zone. We assume that a border between node0 and node1
1764 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1779 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1791 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1793 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1797 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1801 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1804 block_end_pfn, zone)) in set_zone_contiguous()
1810 zone->contiguous = true; in set_zone_contiguous()
1813 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1815 zone->contiguous = false; in clear_zone_contiguous()
1902 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1907 int nid = zone_to_nid(zone); in deferred_init_pages()
1909 int zid = zone_idx(zone); in deferred_init_pages()
1928 * This function is meant to pre-load the iterator for the zone init.
1934 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1941 * Start out by walking through the ranges in this zone that have in deferred_init_mem_pfn_range_in_zone()
1945 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1968 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1977 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1984 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1995 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
2016 struct zone *zone = arg; in deferred_init_memmap_chunk() local
2019 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
2026 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
2046 struct zone *zone; in deferred_init_memmap() local
2068 * Once we unlock here, the zone cannot be grown anymore, thus if an in deferred_init_memmap()
2069 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
2074 /* Only the highest zone is deferred so find it */ in deferred_init_memmap()
2076 zone = pgdat->node_zones + zid; in deferred_init_memmap()
2077 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
2081 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_init_memmap()
2082 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2092 .fn_arg = zone, in deferred_init_memmap()
2101 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2105 /* Sanity check that the next zone really is unpopulated */ in deferred_init_memmap()
2106 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2116 * If this zone has deferred pages, try to grow it by initializing enough
2122 * Return true when zone was grown, otherwise return false. We return true even
2131 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2134 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2140 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2141 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2147 * If someone grew this zone while we were waiting for spinlock, return in deferred_grow_zone()
2155 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_grow_zone()
2156 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2173 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2198 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2200 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2207 struct zone *zone; in page_alloc_init_late() local
2239 for_each_populated_zone(zone) in page_alloc_init_late()
2240 set_zone_contiguous(zone); in page_alloc_init_late()
2289 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2297 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2305 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2308 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2448 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2457 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2461 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2462 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2488 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2491 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2494 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2503 static int move_freepages(struct zone *zone, in move_freepages() argument
2528 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2529 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2532 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2540 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2552 /* Do not cross zone boundaries */ in move_freepages_block()
2553 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2555 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2558 return move_freepages(zone, start_pfn, end_pfn, migratetype, in move_freepages_block()
2606 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
2618 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2621 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2637 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2651 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2678 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback()
2679 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2685 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2709 /* moving whole block can fail due to zone boundary conditions */ in steal_suitable_fallback()
2724 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2768 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2775 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. in reserve_highatomic_pageblock()
2778 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2779 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2782 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2785 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2792 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2794 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2798 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2816 struct zone *zone; in unreserve_highatomic_pageblock() local
2821 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2827 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2831 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2833 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2854 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2856 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2869 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2872 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2876 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2893 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2918 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2944 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2960 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2972 * Call me with the zone->lock already held.
2975 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2983 * allocating from CMA when over half of the zone's free memory in __rmqueue()
2987 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
2988 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
2989 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2995 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2998 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
3000 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
3015 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
3025 spin_lock(&zone->lock); in rmqueue_bulk()
3027 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
3048 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
3058 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
3059 spin_unlock(&zone->lock); in rmqueue_bulk()
3072 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
3081 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
3087 * Drain pcplists of the indicated processor and zone.
3093 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
3100 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in drain_pages_zone()
3102 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
3116 struct zone *zone; in drain_pages() local
3118 for_each_populated_zone(zone) { in drain_pages()
3119 drain_pages_zone(cpu, zone); in drain_pages()
3126 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3127 * the single zone's pages.
3129 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
3133 if (zone) in drain_local_pages()
3134 drain_pages_zone(cpu, zone); in drain_local_pages()
3153 drain_local_pages(drain->zone); in drain_local_pages_wq()
3167 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) in __drain_all_pages() argument
3186 * a zone. Such callers are primarily CMA and memory hotplug and need in __drain_all_pages()
3190 if (!zone) in __drain_all_pages()
3203 struct zone *z; in __drain_all_pages()
3212 } else if (zone) { in __drain_all_pages()
3213 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __drain_all_pages()
3235 drain->zone = zone; in __drain_all_pages()
3248 * When zone parameter is non-NULL, spill just the single zone's pages.
3252 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3254 __drain_all_pages(zone, false); in drain_all_pages()
3264 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3271 if (zone_is_empty(zone)) in mark_free_pages()
3274 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3276 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3277 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3286 if (page_zone(page) != zone) in mark_free_pages()
3295 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3308 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3349 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone) in nr_pcp_high() argument
3356 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) in nr_pcp_high()
3369 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3375 pcp = this_cpu_ptr(zone->per_cpu_pageset); in free_unref_page_commit()
3379 high = nr_pcp_high(pcp, zone); in free_unref_page_commit()
3383 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp); in free_unref_page_commit()
3506 struct zone *zone; in __isolate_free_page() local
3511 zone = page_zone(page); in __isolate_free_page()
3521 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3522 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3525 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3530 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3562 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3564 /* zone lock should be held when this function is called */ in __putback_isolated_page()
3565 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3568 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3577 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, in zone_statistics()
3602 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist() argument
3624 alloced = rmqueue_bulk(zone, order, in __rmqueue_pcplist()
3642 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3643 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
3659 pcp = this_cpu_ptr(zone->per_cpu_pageset); in rmqueue_pcplist()
3662 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3666 zone_statistics(preferred_zone, zone, 1); in rmqueue_pcplist()
3672 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3675 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3676 struct zone *zone, unsigned int order, in rmqueue() argument
3690 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3701 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3712 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3717 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3722 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3724 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue()
3727 zone_statistics(preferred_zone, zone, 1); in rmqueue()
3731 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3732 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3733 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3736 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3740 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue()
3818 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3844 * one free page of a suitable size. Checking now avoids taking the zone lock
3847 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3911 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3918 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3958 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3971 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3973 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3977 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3984 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3985 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3986 * premature use of a lower zone may cause lowmem pressure problems that
3987 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3992 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
4003 if (!zone) in alloc_flags_nofragment()
4006 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
4011 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
4015 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
4043 struct zone *zone; in get_page_from_freelist() local
4049 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist()
4054 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
4061 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
4083 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
4086 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
4087 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
4093 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
4101 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
4102 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
4108 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
4109 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
4116 * Watermark failed for this zone, but see if we can in get_page_from_freelist()
4117 * grow this zone if it contains deferred pages. in get_page_from_freelist()
4120 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4130 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
4133 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
4143 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
4152 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
4162 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
4167 /* Try again if zone has deferred pages */ in get_page_from_freelist()
4169 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4368 * At least in one zone compaction wasn't deferred or skipped, so let's in __alloc_pages_direct_compact()
4382 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4384 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4385 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4423 * compaction considers all the zone as desperately out of memory in should_compact_retry()
4497 struct zone *zone; in should_compact_retry() local
4509 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4511 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4641 struct zone *zone; in wake_all_kswapds() local
4645 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4647 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4648 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4649 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4752 struct zone *zone; in should_reclaim_retry() local
4781 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4785 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4788 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4789 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4795 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4809 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4916 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
5163 /* Dirty zone balancing only done in the fast path */ in prepare_alloc_pages()
5167 * The preferred zone is used for statistics but crucially it is in prepare_alloc_pages()
5204 struct zone *zone; in __alloc_pages_bulk() local
5255 /* Find an allowed local zone that meets the low watermark. */ in __alloc_pages_bulk()
5256 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { in __alloc_pages_bulk()
5260 !__cpuset_zone_allowed(zone, gfp)) { in __alloc_pages_bulk()
5264 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && in __alloc_pages_bulk()
5265 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { in __alloc_pages_bulk()
5269 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; in __alloc_pages_bulk()
5270 if (zone_watermark_fast(zone, 0, mark, in __alloc_pages_bulk()
5281 if (unlikely(!zone)) in __alloc_pages_bulk()
5286 pcp = this_cpu_ptr(zone->per_cpu_pageset); in __alloc_pages_bulk()
5297 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, in __alloc_pages_bulk()
5317 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); in __alloc_pages_bulk()
5318 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); in __alloc_pages_bulk()
5378 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); in __alloc_pages()
5676 * @offset: The zone index of the highest zone
5679 * high watermark within all zones at or below a given zone index. For each
5680 * zone, the number of pages is calculated as:
5689 struct zone *zone; in nr_free_zone_pages() local
5696 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5697 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5698 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5721 static inline void show_node(struct zone *zone) in show_node() argument
5724 printk("Node %d ", zone_to_nid(zone)); in show_node()
5734 struct zone *zone; in si_mem_available() local
5740 for_each_zone(zone) in si_mem_available()
5741 wmark_low += low_wmark_pages(zone); in si_mem_available()
5802 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5804 if (is_highmem(zone)) { in si_meminfo_node()
5805 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5806 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5881 struct zone *zone; in show_free_areas() local
5884 for_each_populated_zone(zone) { in show_free_areas()
5885 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5889 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in show_free_areas()
5975 for_each_populated_zone(zone) { in show_free_areas()
5978 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5983 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in show_free_areas()
5985 show_node(zone); in show_free_areas()
6007 zone->name, in show_free_areas()
6008 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
6009 K(min_wmark_pages(zone)), in show_free_areas()
6010 K(low_wmark_pages(zone)), in show_free_areas()
6011 K(high_wmark_pages(zone)), in show_free_areas()
6012 K(zone->nr_reserved_highatomic), in show_free_areas()
6013 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
6014 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
6015 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
6016 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
6017 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
6018 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
6019 K(zone->present_pages), in show_free_areas()
6020 K(zone_managed_pages(zone)), in show_free_areas()
6021 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
6022 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
6024 K(this_cpu_read(zone->per_cpu_pageset->count)), in show_free_areas()
6025 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
6028 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
6032 for_each_populated_zone(zone) { in show_free_areas()
6037 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
6039 show_node(zone); in show_free_areas()
6040 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
6042 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
6044 struct free_area *area = &zone->free_area[order]; in show_free_areas()
6056 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
6073 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
6075 zoneref->zone = zone; in zoneref_set_zone()
6076 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
6080 * Builds allocation fallback zone lists.
6086 struct zone *zone; in build_zonerefs_node() local
6092 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
6093 if (managed_zone(zone)) { in build_zonerefs_node()
6094 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
6198 * This results in maximum locality--normal zone overflows into local
6199 * DMA zone, if any--but risks exhausting DMA zone.
6217 zonerefs->zone = NULL; in build_zonelists_in_node_order()
6232 zonerefs->zone = NULL; in build_thisnode_zonelists()
6237 * Build zonelists ordered by zone and nodes within zones.
6238 * This results in conserving DMA zone[s] until all Normal memory is
6240 * may still exist in local DMA zone.
6278 * I.e., first node id of first zone in arg node's generic zonelist.
6289 return zone_to_nid(z->zone); in local_memory_node()
6330 zonerefs->zone = NULL; in build_zonelists()
6349 * Other parts of the kernel may not check if the zone is available.
6388 * i.e., the node of the first zone in the generic zonelist. in __build_all_zonelists()
6412 * each zone will be allocated later when the per cpu in build_all_zonelists_init()
6450 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
6464 pr_info("Policy zone: %s\n", zone_names[policy_zone]); in build_all_zonelists()
6468 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6470 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6474 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6497 * zone stats (e.g., nr_isolate_pageblock) are touched.
6499 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, in memmap_init_range() argument
6518 if (zone == ZONE_DEVICE) { in memmap_init_range()
6534 if (overlap_memmap_init(zone, &pfn)) in memmap_init_range()
6541 __init_single_page(page, pfn, zone, nid); in memmap_init_range()
6559 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6565 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6567 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6571 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6591 * phase for it to be fully associated with a zone. in memmap_init_zone_device()
6627 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6631 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6632 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6651 * - zone and node links point to zone and node that span the page if the
6652 * hole is in the middle of a zone
6653 * - zone and node links point to adjacent zone/node if the hole falls on
6654 * the zone boundary; the pages in such holes will be prepended to the
6655 * zone/node above the hole except for the trailing pages in the last
6656 * section that will be appended to the zone/node below.
6660 int zone, int node) in init_unavailable_range() argument
6671 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
6677 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", in init_unavailable_range()
6678 node, zone_names[zone], pgcnt); in init_unavailable_range()
6681 static void __init memmap_init_zone_range(struct zone *zone, in memmap_init_zone_range() argument
6686 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
6687 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
6688 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone_range()
6715 struct zone *zone = node->node_zones + j; in memmap_init() local
6717 if (!populated_zone(zone)) in memmap_init()
6720 memmap_init_zone_range(zone, start_pfn, end_pfn, in memmap_init()
6730 * Append the pages in this hole to the highest zone in the last in memmap_init()
6762 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6769 * of the zone or 1MB, whichever is smaller. The batch in zone_batchsize()
6771 * and zone lock contention. in zone_batchsize()
6773 batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE); in zone_batchsize()
6810 static int zone_highsize(struct zone *zone, int batch, int cpu_online) in zone_highsize() argument
6819 * By default, the high value of the pcp is based on the zone in zone_highsize()
6823 total_pages = low_wmark_pages(zone); in zone_highsize()
6828 * zone. in zone_highsize()
6830 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; in zone_highsize()
6834 * Split the high value across all online CPUs local to the zone. Note in zone_highsize()
6841 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; in zone_highsize()
6902 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, in __zone_set_pageset_high_and_batch() argument
6909 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __zone_set_pageset_high_and_batch()
6916 * zone based on the zone's size.
6918 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) in zone_set_pageset_high_and_batch() argument
6922 new_batch = max(1, zone_batchsize(zone)); in zone_set_pageset_high_and_batch()
6923 new_high = zone_highsize(zone, new_batch, cpu_online); in zone_set_pageset_high_and_batch()
6925 if (zone->pageset_high == new_high && in zone_set_pageset_high_and_batch()
6926 zone->pageset_batch == new_batch) in zone_set_pageset_high_and_batch()
6929 zone->pageset_high = new_high; in zone_set_pageset_high_and_batch()
6930 zone->pageset_batch = new_batch; in zone_set_pageset_high_and_batch()
6932 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); in zone_set_pageset_high_and_batch()
6935 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6941 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); in setup_zone_pageset()
6943 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); in setup_zone_pageset()
6948 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in setup_zone_pageset()
6949 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in setup_zone_pageset()
6953 zone_set_pageset_high_and_batch(zone, 0); in setup_zone_pageset()
6963 struct zone *zone; in setup_per_cpu_pageset() local
6966 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6967 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6988 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6995 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_init()
6996 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_init()
6997 zone->pageset_high = BOOT_PAGESET_HIGH; in zone_pcp_init()
6998 zone->pageset_batch = BOOT_PAGESET_BATCH; in zone_pcp_init()
7000 if (populated_zone(zone)) in zone_pcp_init()
7001 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, in zone_pcp_init()
7002 zone->present_pages, zone_batchsize(zone)); in zone_pcp_init()
7005 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
7009 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
7010 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
7015 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
7018 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
7020 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
7023 zone_init_free_lists(zone); in init_currently_empty_zone()
7024 zone->initialized = 1; in init_currently_empty_zone()
7057 * This finds a zone that can be used for ZONE_MOVABLE pages. The
7059 * increasing memory addresses so that the "highest" populated zone is used
7078 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7082 * is distributed. This helper function adjusts the zone ranges
7084 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7115 * Return the number of pages a zone spans in a node, including holes
7131 /* Get the start and end of the zone */ in zone_spanned_pages_in_node()
7138 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
7142 /* Move the zone boundaries inside the node if necessary */ in zone_spanned_pages_in_node()
7183 /* Return the number of page frames in holes in a zone on a node */
7242 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
7260 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
7262 zone->zone_start_pfn = 0; in calculate_node_totalpages()
7263 zone->spanned_pages = size; in calculate_node_totalpages()
7264 zone->present_pages = real_size; in calculate_node_totalpages()
7266 zone->present_early_pages = real_size; in calculate_node_totalpages()
7280 * Calculate the size of the zone->blockflags rounded to an unsigned long
7299 static void __ref setup_usemap(struct zone *zone) in setup_usemap() argument
7301 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
7302 zone->spanned_pages); in setup_usemap()
7303 zone->pageblock_flags = NULL; in setup_usemap()
7305 zone->pageblock_flags = in setup_usemap()
7307 zone_to_nid(zone)); in setup_usemap()
7308 if (!zone->pageblock_flags) in setup_usemap()
7309 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", in setup_usemap()
7310 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
7314 static inline void setup_usemap(struct zone *zone) {} in setup_usemap() argument
7361 * the zone and SPARSEMEM is in use. If there are holes within the in calc_memmap_size()
7362 * zone, each populated memory region may cost us one or two extra in calc_memmap_size()
7410 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
7413 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
7414 zone_set_nid(zone, nid); in zone_init_internals()
7415 zone->name = zone_names[idx]; in zone_init_internals()
7416 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
7417 spin_lock_init(&zone->lock); in zone_init_internals()
7418 zone_seqlock_init(zone); in zone_init_internals()
7419 zone_pcp_init(zone); in zone_init_internals()
7423 * Set up the zone data structures
7442 * Set up the zone data structures:
7459 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
7462 size = zone->spanned_pages; in free_area_init_core()
7463 freesize = zone->present_pages; in free_area_init_core()
7467 * is used by this zone for memmap. This affects the watermark in free_area_init_core()
7475 pr_debug(" %s zone: %lu pages used for memmap\n", in free_area_init_core()
7478 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", in free_area_init_core()
7485 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); in free_area_init_core()
7500 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
7506 setup_usemap(zone); in free_area_init_core()
7507 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
7529 * The zone's endpoints aren't required to be MAX_ORDER in alloc_node_mem_map()
7677 * Sum pages in active regions for movable zone.
7697 * Find the PFN the Movable zone begins in each node. Kernel memory
7915 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7916 if (populated_zone(zone)) { in check_for_memory()
7936 * free_area_init - Initialise all pg_data_t and zone data
7937 * @max_zone_pfn: an array of max PFNs for each zone
7941 * zone in each node and their holes is calculated. If the maximum PFN
7942 * between two adjacent zones match, it is assumed that the zone is empty.
7944 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7951 int i, nid, zone; in free_area_init() local
7954 /* Record where the zone boundaries are */ in free_area_init()
7965 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7967 zone = i; in free_area_init()
7969 if (zone == ZONE_MOVABLE) in free_area_init()
7972 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
7973 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
7974 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
7983 /* Print out the zone ranges */ in free_area_init()
7984 pr_info("Zone ranges:\n"); in free_area_init()
8001 pr_info("Movable zone start for each node\n"); in free_area_init()
8194 * set_dma_reserve - set the specified number of pages reserved in the first zone
8197 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8198 * In the DMA zone, a significant percentage may be consumed by kernel image
8201 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8211 struct zone *zone; in page_alloc_cpu_dead() local
8233 for_each_populated_zone(zone) in page_alloc_cpu_dead()
8234 zone_pcp_update(zone, 0); in page_alloc_cpu_dead()
8241 struct zone *zone; in page_alloc_cpu_online() local
8243 for_each_populated_zone(zone) in page_alloc_cpu_online()
8244 zone_pcp_update(zone, 1); in page_alloc_cpu_online()
8292 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
8294 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
8296 /* Find valid and maximum lowmem_reserve in the zone */ in calculate_totalreserve_pages()
8298 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
8299 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
8303 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
8318 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
8320 * pages are left in the zone after a successful __alloc_pages().
8329 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
8331 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
8335 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
8340 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
8342 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
8355 struct zone *zone; in __setup_per_zone_wmarks() local
8359 for_each_zone(zone) { in __setup_per_zone_wmarks()
8360 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
8361 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
8364 for_each_zone(zone) { in __setup_per_zone_wmarks()
8367 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
8368 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
8370 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
8382 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
8384 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
8387 * If it's a lowmem zone, reserve a number of pages in __setup_per_zone_wmarks()
8388 * proportionate to the zone's size. in __setup_per_zone_wmarks()
8390 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
8399 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
8402 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
8403 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
8404 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
8406 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
8417 * Ensures that the watermark[min,low,high] values for each zone are set
8422 struct zone *zone; in setup_per_zone_wmarks() local
8433 for_each_zone(zone) in setup_per_zone_wmarks()
8434 zone_pcp_update(zone, 0); in setup_per_zone_wmarks()
8534 struct zone *zone; in setup_min_unmapped_ratio() local
8539 for_each_zone(zone) in setup_min_unmapped_ratio()
8540 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8562 struct zone *zone; in setup_min_slab_ratio() local
8567 for_each_zone(zone) in setup_min_slab_ratio()
8568 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8594 * if in function of the boot time zone sizes.
8613 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
8614 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8620 struct zone *zone; in percpu_pagelist_high_fraction_sysctl_handler() local
8643 for_each_populated_zone(zone) in percpu_pagelist_high_fraction_sysctl_handler()
8644 zone_set_pageset_high_and_batch(zone, 0); in percpu_pagelist_high_fraction_sysctl_handler()
8805 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8837 * If the zone is movable and we have ruled out all reserved in has_unmovable_pages()
8841 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8945 /* [start, end) must belong to a single zone. */
8955 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8979 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
9015 * aligned. The PFN range must belong to a single zone.
9035 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
9073 drain_all_pages(cc.zone); in alloc_contig_range()
9103 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
9165 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
9185 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
9190 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
9219 struct zone *zone; in alloc_contig_pages() local
9223 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
9225 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
9227 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
9228 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
9229 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
9231 * We release the zone lock here because in alloc_contig_pages()
9232 * alloc_contig_range() will also lock the zone in alloc_contig_pages()
9237 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
9242 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
9246 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
9267 * The zone indicated has a new number of managed_pages; batch sizes and percpu
9270 void zone_pcp_update(struct zone *zone, int cpu_online) in zone_pcp_update() argument
9273 zone_set_pageset_high_and_batch(zone, cpu_online); in zone_pcp_update()
9278 * Effectively disable pcplists for the zone by setting the high limit to 0
9285 void zone_pcp_disable(struct zone *zone) in zone_pcp_disable() argument
9288 __zone_set_pageset_high_and_batch(zone, 0, 1); in zone_pcp_disable()
9289 __drain_all_pages(zone, true); in zone_pcp_disable()
9292 void zone_pcp_enable(struct zone *zone) in zone_pcp_enable() argument
9294 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); in zone_pcp_enable()
9298 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
9303 if (zone->per_cpu_pageset != &boot_pageset) { in zone_pcp_reset()
9305 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in zone_pcp_reset()
9306 drain_zonestat(zone, pzstats); in zone_pcp_reset()
9308 free_percpu(zone->per_cpu_pageset); in zone_pcp_reset()
9309 free_percpu(zone->per_cpu_zonestats); in zone_pcp_reset()
9310 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_reset()
9311 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_reset()
9317 * All pages in the range must be in a single zone, must not contain holes,
9324 struct zone *zone; in __offline_isolated_pages() local
9329 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
9330 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
9355 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
9358 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
9364 struct zone *zone = page_zone(page); in is_free_buddy_page() local
9369 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
9376 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
9386 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
9405 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
9409 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
9421 struct zone *zone = page_zone(page); in take_page_off_buddy() local
9427 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
9437 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
9438 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
9441 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
9448 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()