Lines Matching refs:zone

639 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)  in page_outside_zone_boundaries()  argument
647 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
648 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
649 sp = zone->spanned_pages; in page_outside_zone_boundaries()
650 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
652 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
656 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
662 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
664 if (zone != page_zone(page)) in page_is_consistent()
672 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
674 if (page_outside_zone_boundaries(zone, page)) in bad_range()
676 if (!page_is_consistent(zone, page)) in bad_range()
682 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
864 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
878 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
883 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
893 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
896 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
898 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
967 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
974 capc->cc->zone == zone ? capc : NULL; in task_capc()
1003 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
1017 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
1020 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
1027 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
1030 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
1041 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
1044 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
1049 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
1059 zone->free_area[order].nr_free--; in del_page_from_free_list()
1113 struct zone *zone, unsigned int order, in __free_one_page() argument
1116 struct capture_control *capc = task_capc(zone); in __free_one_page()
1122 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1127 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1130 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1134 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1163 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1165 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1183 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1185 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1208 struct zone *zone = page_zone(free_page); in split_free_page() local
1219 spin_lock_irqsave(&zone->lock, flags); in split_free_page()
1228 __mod_zone_freepage_state(zone, -(1UL << order), mt); in split_free_page()
1230 del_page_from_free_list(free_page, zone, order); in split_free_page()
1238 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, in split_free_page()
1247 spin_unlock_irqrestore(&zone->lock, flags); in split_free_page()
1546 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1566 spin_lock(&zone->lock); in free_pcppages_bulk()
1567 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1609 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1614 spin_unlock(&zone->lock); in free_pcppages_bulk()
1617 static void free_one_page(struct zone *zone, in free_one_page() argument
1624 spin_lock_irqsave(&zone->lock, flags); in free_one_page()
1625 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1629 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1630 spin_unlock_irqrestore(&zone->lock, flags); in free_one_page()
1634 unsigned long zone, int nid) in __init_single_page() argument
1637 set_page_links(page, zone, nid, pfn); in __init_single_page()
1646 if (!is_highmem_idx(zone)) in __init_single_page()
1664 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1666 if (zone_spans_pfn(zone, pfn)) in init_reserved_page()
1713 struct zone *zone = page_zone(page); in __free_pages_ok() local
1720 spin_lock_irqsave(&zone->lock, flags); in __free_pages_ok()
1721 if (unlikely(has_isolate_pageblock(zone) || in __free_pages_ok()
1725 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in __free_pages_ok()
1726 spin_unlock_irqrestore(&zone->lock, flags); in __free_pages_ok()
1840 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1855 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1867 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1869 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1873 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1877 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1880 block_end_pfn, zone)) in set_zone_contiguous()
1886 zone->contiguous = true; in set_zone_contiguous()
1889 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1891 zone->contiguous = false; in clear_zone_contiguous()
1972 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1976 int nid = zone_to_nid(zone); in deferred_init_pages()
1978 int zid = zone_idx(zone); in deferred_init_pages()
2003 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
2014 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
2037 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
2046 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
2053 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
2064 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
2085 struct zone *zone = arg; in deferred_init_memmap_chunk() local
2088 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
2095 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
2115 struct zone *zone; in deferred_init_memmap() local
2145 zone = pgdat->node_zones + zid; in deferred_init_memmap()
2146 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
2151 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2161 .fn_arg = zone, in deferred_init_memmap()
2170 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2175 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2200 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2203 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2210 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2225 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2242 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2267 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2269 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2276 struct zone *zone; in page_alloc_init_late() local
2308 for_each_populated_zone(zone) in page_alloc_init_late()
2309 set_zone_contiguous(zone); in page_alloc_init_late()
2347 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2355 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2363 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2366 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2561 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2570 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2574 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2575 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2600 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2603 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2606 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2615 static int move_freepages(struct zone *zone, in move_freepages() argument
2640 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2641 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2644 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2652 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2665 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2667 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2670 return move_freepages(zone, start_pfn, end_pfn, migratetype, in move_freepages_block()
2718 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
2730 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2733 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2749 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2763 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2790 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback()
2791 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2797 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2836 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2880 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2890 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2891 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2894 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2897 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2904 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2906 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2910 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2928 struct zone *zone; in unreserve_highatomic_pageblock() local
2933 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2939 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2943 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2945 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2966 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2968 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2981 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2984 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2988 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
3005 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
3030 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
3056 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
3072 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
3087 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
3099 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
3100 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
3101 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
3107 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
3110 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
3112 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
3124 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
3131 spin_lock(&zone->lock); in rmqueue_bulk()
3133 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
3154 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
3164 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
3165 spin_unlock(&zone->lock); in rmqueue_bulk()
3175 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
3190 free_pcppages_bulk(zone, to_drain, pcp, 0); in drain_zone_pages()
3199 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
3203 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in drain_pages_zone()
3209 free_pcppages_bulk(zone, pcp->count, pcp, 0); in drain_pages_zone()
3219 struct zone *zone; in drain_pages() local
3221 for_each_populated_zone(zone) { in drain_pages()
3222 drain_pages_zone(cpu, zone); in drain_pages()
3229 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
3233 if (zone) in drain_local_pages()
3234 drain_pages_zone(cpu, zone); in drain_local_pages()
3249 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) in __drain_all_pages() argument
3265 if (!zone) in __drain_all_pages()
3278 struct zone *z; in __drain_all_pages()
3287 } else if (zone) { in __drain_all_pages()
3288 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __drain_all_pages()
3308 if (zone) in __drain_all_pages()
3309 drain_pages_zone(cpu, zone); in __drain_all_pages()
3322 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3324 __drain_all_pages(zone, false); in drain_all_pages()
3334 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3341 if (zone_is_empty(zone)) in mark_free_pages()
3344 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3346 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3347 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3356 if (page_zone(page) != zone) in mark_free_pages()
3365 &zone->free_area[order].free_list[t], buddy_list) { in mark_free_pages()
3378 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3424 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, in nr_pcp_high() argument
3432 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) in nr_pcp_high()
3442 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, in free_unref_page_commit() argument
3463 high = nr_pcp_high(pcp, zone, free_high); in free_unref_page_commit()
3467 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex); in free_unref_page_commit()
3479 struct zone *zone; in free_unref_page() local
3502 zone = page_zone(page); in free_unref_page()
3504 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); in free_unref_page()
3506 free_unref_page_commit(zone, pcp, page, migratetype, order); in free_unref_page()
3509 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); in free_unref_page()
3521 struct zone *locked_zone = NULL; in free_unref_page_list()
3547 struct zone *zone = page_zone(page); in free_unref_page_list() local
3550 if (zone != locked_zone) { in free_unref_page_list()
3554 locked_zone = zone; in free_unref_page_list()
3567 free_unref_page_commit(zone, pcp, page, migratetype, 0); in free_unref_page_list()
3608 struct zone *zone = page_zone(page); in __isolate_free_page() local
3619 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3620 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3623 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3626 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3660 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3663 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3666 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3673 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, in zone_statistics()
3697 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, in rmqueue_buddy() argument
3706 spin_lock_irqsave(&zone->lock, flags); in rmqueue_buddy()
3714 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
3716 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue_buddy()
3718 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_buddy()
3722 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue_buddy()
3724 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_buddy()
3728 zone_statistics(preferred_zone, zone, 1); in rmqueue_buddy()
3735 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist() argument
3757 alloced = rmqueue_bulk(zone, order, in __rmqueue_pcplist()
3775 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3776 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
3790 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); in rmqueue_pcplist()
3803 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3808 zone_statistics(preferred_zone, zone, 1); in rmqueue_pcplist()
3826 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3827 struct zone *zone, unsigned int order, in rmqueue() argument
3846 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3853 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
3858 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { in rmqueue()
3859 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3860 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3863 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3947 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3976 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
4040 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
4047 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
4091 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
4106 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
4108 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
4112 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
4127 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
4138 if (!zone) in alloc_flags_nofragment()
4141 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
4150 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
4178 struct zone *zone; in get_page_from_freelist() local
4190 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
4197 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
4219 if (last_pgdat != zone->zone_pgdat) { in get_page_from_freelist()
4220 last_pgdat = zone->zone_pgdat; in get_page_from_freelist()
4221 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); in get_page_from_freelist()
4229 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
4237 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
4238 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
4244 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
4245 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
4256 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4266 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
4269 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
4279 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
4288 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
4298 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
4305 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4523 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4525 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4526 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4638 struct zone *zone; in should_compact_retry() local
4650 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4652 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4808 struct zone *zone; in wake_all_kswapds() local
4812 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4814 if (!managed_zone(zone)) in wake_all_kswapds()
4816 if (last_pgdat != zone->zone_pgdat) { in wake_all_kswapds()
4817 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4818 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4922 struct zone *zone; in should_reclaim_retry() local
4951 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4955 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4958 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4959 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4965 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
5069 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
5081 if (!z->zone) in __alloc_pages_slowpath()
5377 struct zone *zone; in __alloc_pages_bulk() local
5429 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { in __alloc_pages_bulk()
5433 !__cpuset_zone_allowed(zone, gfp)) { in __alloc_pages_bulk()
5437 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && in __alloc_pages_bulk()
5438 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { in __alloc_pages_bulk()
5442 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; in __alloc_pages_bulk()
5443 if (zone_watermark_fast(zone, 0, mark, in __alloc_pages_bulk()
5454 if (unlikely(!zone)) in __alloc_pages_bulk()
5459 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); in __alloc_pages_bulk()
5473 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, in __alloc_pages_bulk()
5496 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); in __alloc_pages_bulk()
5497 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); in __alloc_pages_bulk()
5555 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); in __alloc_pages()
5895 struct zone *zone; in nr_free_zone_pages() local
5902 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5903 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5904 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5927 static inline void show_node(struct zone *zone) in show_node() argument
5930 printk("Node %d ", zone_to_nid(zone)); in show_node()
5940 struct zone *zone; in si_mem_available() local
5946 for_each_zone(zone) in si_mem_available()
5947 wmark_low += low_wmark_pages(zone); in si_mem_available()
6008 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
6010 if (is_highmem(zone)) { in si_meminfo_node()
6011 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
6012 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
6096 struct zone *zone; in __show_free_areas() local
6099 for_each_populated_zone(zone) { in __show_free_areas()
6100 if (zone_idx(zone) > max_zone_idx) in __show_free_areas()
6102 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in __show_free_areas()
6106 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in __show_free_areas()
6198 for_each_populated_zone(zone) { in __show_free_areas()
6201 if (zone_idx(zone) > max_zone_idx) in __show_free_areas()
6203 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in __show_free_areas()
6208 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in __show_free_areas()
6210 show_node(zone); in __show_free_areas()
6233 zone->name, in __show_free_areas()
6234 K(zone_page_state(zone, NR_FREE_PAGES)), in __show_free_areas()
6235 K(zone->watermark_boost), in __show_free_areas()
6236 K(min_wmark_pages(zone)), in __show_free_areas()
6237 K(low_wmark_pages(zone)), in __show_free_areas()
6238 K(high_wmark_pages(zone)), in __show_free_areas()
6239 K(zone->nr_reserved_highatomic), in __show_free_areas()
6240 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in __show_free_areas()
6241 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in __show_free_areas()
6242 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in __show_free_areas()
6243 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in __show_free_areas()
6244 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in __show_free_areas()
6245 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in __show_free_areas()
6246 K(zone->present_pages), in __show_free_areas()
6247 K(zone_managed_pages(zone)), in __show_free_areas()
6248 K(zone_page_state(zone, NR_MLOCK)), in __show_free_areas()
6249 K(zone_page_state(zone, NR_BOUNCE)), in __show_free_areas()
6251 K(this_cpu_read(zone->per_cpu_pageset->count)), in __show_free_areas()
6252 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in __show_free_areas()
6255 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in __show_free_areas()
6259 for_each_populated_zone(zone) { in __show_free_areas()
6264 if (zone_idx(zone) > max_zone_idx) in __show_free_areas()
6266 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in __show_free_areas()
6268 show_node(zone); in __show_free_areas()
6269 printk(KERN_CONT "%s: ", zone->name); in __show_free_areas()
6271 spin_lock_irqsave(&zone->lock, flags); in __show_free_areas()
6273 struct free_area *area = &zone->free_area[order]; in __show_free_areas()
6285 spin_unlock_irqrestore(&zone->lock, flags); in __show_free_areas()
6306 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
6308 zoneref->zone = zone; in zoneref_set_zone()
6309 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
6319 struct zone *zone; in build_zonerefs_node() local
6325 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
6326 if (populated_zone(zone)) { in build_zonerefs_node()
6327 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
6449 zonerefs->zone = NULL; in build_zonelists_in_node_order()
6464 zonerefs->zone = NULL; in build_thisnode_zonelists()
6523 return zone_to_nid(z->zone); in local_memory_node()
6564 zonerefs->zone = NULL; in build_zonelists()
6707 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6711 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6736 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, in memmap_init_range() argument
6755 if (zone == ZONE_DEVICE) { in memmap_init_range()
6771 if (overlap_memmap_init(zone, &pfn)) in memmap_init_range()
6778 __init_single_page(page, pfn, zone, nid); in memmap_init_range()
6888 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6894 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6897 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6931 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6935 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6936 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6964 int zone, int node) in init_unavailable_range() argument
6974 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
6981 node, zone_names[zone], pgcnt); in init_unavailable_range()
6984 static void __init memmap_init_zone_range(struct zone *zone, in memmap_init_zone_range() argument
6989 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
6990 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
6991 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone_range()
7018 struct zone *zone = node->node_zones + j; in memmap_init() local
7020 if (!populated_zone(zone)) in memmap_init()
7023 memmap_init_zone_range(zone, start_pfn, end_pfn, in memmap_init()
7065 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
7076 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); in zone_batchsize()
7113 static int zone_highsize(struct zone *zone, int batch, int cpu_online) in zone_highsize() argument
7126 total_pages = low_wmark_pages(zone); in zone_highsize()
7133 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; in zone_highsize()
7144 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; in zone_highsize()
7206 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, in __zone_set_pageset_high_and_batch() argument
7213 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __zone_set_pageset_high_and_batch()
7222 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) in zone_set_pageset_high_and_batch() argument
7226 new_batch = max(1, zone_batchsize(zone)); in zone_set_pageset_high_and_batch()
7227 new_high = zone_highsize(zone, new_batch, cpu_online); in zone_set_pageset_high_and_batch()
7229 if (zone->pageset_high == new_high && in zone_set_pageset_high_and_batch()
7230 zone->pageset_batch == new_batch) in zone_set_pageset_high_and_batch()
7233 zone->pageset_high = new_high; in zone_set_pageset_high_and_batch()
7234 zone->pageset_batch = new_batch; in zone_set_pageset_high_and_batch()
7236 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); in zone_set_pageset_high_and_batch()
7239 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
7245 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); in setup_zone_pageset()
7247 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); in setup_zone_pageset()
7252 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in setup_zone_pageset()
7253 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in setup_zone_pageset()
7257 zone_set_pageset_high_and_batch(zone, 0); in setup_zone_pageset()
7264 static void zone_pcp_update(struct zone *zone, int cpu_online) in zone_pcp_update() argument
7267 zone_set_pageset_high_and_batch(zone, cpu_online); in zone_pcp_update()
7278 struct zone *zone; in setup_per_cpu_pageset() local
7281 for_each_populated_zone(zone) in setup_per_cpu_pageset()
7282 setup_zone_pageset(zone); in setup_per_cpu_pageset()
7303 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
7310 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_init()
7311 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_init()
7312 zone->pageset_high = BOOT_PAGESET_HIGH; in zone_pcp_init()
7313 zone->pageset_batch = BOOT_PAGESET_BATCH; in zone_pcp_init()
7315 if (populated_zone(zone)) in zone_pcp_init()
7316 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, in zone_pcp_init()
7317 zone->present_pages, zone_batchsize(zone)); in zone_pcp_init()
7320 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
7324 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
7325 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
7330 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
7335 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
7338 zone_init_free_lists(zone); in init_currently_empty_zone()
7339 zone->initialized = 1; in init_currently_empty_zone()
7557 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
7575 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
7577 zone->zone_start_pfn = 0; in calculate_node_totalpages()
7578 zone->spanned_pages = size; in calculate_node_totalpages()
7579 zone->present_pages = real_size; in calculate_node_totalpages()
7581 zone->present_early_pages = real_size; in calculate_node_totalpages()
7614 static void __ref setup_usemap(struct zone *zone) in setup_usemap() argument
7616 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
7617 zone->spanned_pages); in setup_usemap()
7618 zone->pageblock_flags = NULL; in setup_usemap()
7620 zone->pageblock_flags = in setup_usemap()
7622 zone_to_nid(zone)); in setup_usemap()
7623 if (!zone->pageblock_flags) in setup_usemap()
7625 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
7629 static inline void setup_usemap(struct zone *zone) {} in setup_usemap() argument
7730 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
7733 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
7734 zone_set_nid(zone, nid); in zone_init_internals()
7735 zone->name = zone_names[idx]; in zone_init_internals()
7736 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
7737 spin_lock_init(&zone->lock); in zone_init_internals()
7738 zone_seqlock_init(zone); in zone_init_internals()
7739 zone_pcp_init(zone); in zone_init_internals()
7800 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
7803 size = zone->spanned_pages; in free_area_init_core()
7804 freesize = zone->present_pages; in free_area_init_core()
7841 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
7847 setup_usemap(zone); in free_area_init_core()
7848 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
8257 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
8258 if (populated_zone(zone)) { in check_for_memory()
8293 int i, nid, zone; in free_area_init() local
8307 zone = MAX_NR_ZONES - i - 1; in free_area_init()
8309 zone = i; in free_area_init()
8311 if (zone == ZONE_MOVABLE) in free_area_init()
8314 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
8315 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
8316 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
8579 struct zone *zone; in page_alloc_cpu_dead() local
8602 for_each_populated_zone(zone) in page_alloc_cpu_dead()
8603 zone_pcp_update(zone, 0); in page_alloc_cpu_dead()
8610 struct zone *zone; in page_alloc_cpu_online() local
8612 for_each_populated_zone(zone) in page_alloc_cpu_online()
8613 zone_pcp_update(zone, 1); in page_alloc_cpu_online()
8661 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
8663 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
8667 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
8668 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
8672 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
8698 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
8700 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
8704 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
8709 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
8711 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
8724 struct zone *zone; in __setup_per_zone_wmarks() local
8728 for_each_zone(zone) { in __setup_per_zone_wmarks()
8729 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
8730 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
8733 for_each_zone(zone) { in __setup_per_zone_wmarks()
8736 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
8737 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
8739 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
8751 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
8753 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
8759 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
8768 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
8771 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
8772 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
8773 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
8774 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
8776 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
8792 struct zone *zone; in setup_per_zone_wmarks() local
8803 for_each_zone(zone) in setup_per_zone_wmarks()
8804 zone_pcp_update(zone, 0); in setup_per_zone_wmarks()
8905 struct zone *zone; in setup_min_unmapped_ratio() local
8910 for_each_zone(zone) in setup_min_unmapped_ratio()
8911 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8933 struct zone *zone; in setup_min_slab_ratio() local
8938 for_each_zone(zone) in setup_min_slab_ratio()
8939 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8991 struct zone *zone; in percpu_pagelist_high_fraction_sysctl_handler() local
9014 for_each_populated_zone(zone) in percpu_pagelist_high_fraction_sysctl_handler()
9015 zone_set_pageset_high_and_batch(zone, 0); in percpu_pagelist_high_fraction_sysctl_handler()
9196 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
9220 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
9276 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
9310 drain_all_pages(cc.zone); in alloc_contig_range()
9401 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
9421 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
9426 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
9455 struct zone *zone; in alloc_contig_pages() local
9459 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
9461 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
9463 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
9464 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
9465 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
9473 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
9478 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
9482 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
9510 void zone_pcp_disable(struct zone *zone) in zone_pcp_disable() argument
9513 __zone_set_pageset_high_and_batch(zone, 0, 1); in zone_pcp_disable()
9514 __drain_all_pages(zone, true); in zone_pcp_disable()
9517 void zone_pcp_enable(struct zone *zone) in zone_pcp_enable() argument
9519 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); in zone_pcp_enable()
9523 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
9528 if (zone->per_cpu_pageset != &boot_pageset) { in zone_pcp_reset()
9530 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in zone_pcp_reset()
9531 drain_zonestat(zone, pzstats); in zone_pcp_reset()
9533 free_percpu(zone->per_cpu_pageset); in zone_pcp_reset()
9534 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_reset()
9535 if (zone->per_cpu_zonestats != &boot_zonestats) { in zone_pcp_reset()
9536 free_percpu(zone->per_cpu_zonestats); in zone_pcp_reset()
9537 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_reset()
9551 struct zone *zone; in __offline_isolated_pages() local
9556 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
9557 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
9582 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
9585 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
9614 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
9633 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
9637 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
9649 struct zone *zone = page_zone(page); in take_page_off_buddy() local
9655 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
9665 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
9666 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
9670 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
9677 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()
9686 struct zone *zone = page_zone(page); in put_page_back_buddy() local
9692 spin_lock_irqsave(&zone->lock, flags); in put_page_back_buddy()
9695 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); in put_page_back_buddy()
9700 spin_unlock_irqrestore(&zone->lock, flags); in put_page_back_buddy()
9712 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma() local
9714 if (managed_zone(zone)) in has_managed_dma()