Lines Matching refs:zone

454 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)  in page_outside_zone_boundaries()  argument
462 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
463 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
464 sp = zone->spanned_pages; in page_outside_zone_boundaries()
465 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
467 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
471 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
477 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
481 if (zone != page_zone(page)) in page_is_consistent()
489 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
491 if (page_outside_zone_boundaries(zone, page)) in bad_range()
493 if (!page_is_consistent(zone, page)) in bad_range()
499 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
643 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
663 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
668 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
684 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
688 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
690 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
773 struct zone *zone, unsigned int order, in __free_one_page() argument
783 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
788 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
791 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
807 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
810 zone->free_area[order].nr_free--; in __free_one_page()
827 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
863 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
868 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
870 zone->free_area[order].nr_free++; in __free_one_page()
1080 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1136 spin_lock(&zone->lock); in free_pcppages_bulk()
1137 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1151 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk()
1154 spin_unlock(&zone->lock); in free_pcppages_bulk()
1157 static void free_one_page(struct zone *zone, in free_one_page() argument
1162 spin_lock(&zone->lock); in free_one_page()
1163 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1167 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
1168 spin_unlock(&zone->lock); in free_one_page()
1172 unsigned long zone, int nid) in __init_single_page() argument
1175 set_page_links(page, zone, nid, pfn); in __init_single_page()
1183 if (!is_highmem_idx(zone)) in __init_single_page()
1201 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1203 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1355 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1370 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1382 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1384 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1388 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1392 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1395 block_end_pfn, zone)) in set_zone_contiguous()
1400 zone->contiguous = true; in set_zone_contiguous()
1403 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1405 zone->contiguous = false; in clear_zone_contiguous()
1539 struct zone *zone; in deferred_init_memmap() local
1562 zone = pgdat->node_zones + zid; in deferred_init_memmap()
1563 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
1566 first_init_pfn = max(zone->zone_start_pfn, first_init_pfn); in deferred_init_memmap()
1576 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); in deferred_init_memmap()
1581 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); in deferred_init_memmap()
1587 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
1619 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
1621 int zid = zone_idx(zone); in deferred_grow_zone()
1622 int nid = zone_to_nid(zone); in deferred_grow_zone()
1632 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
1657 first_init_pfn = max(zone->zone_start_pfn, first_deferred_pfn); in deferred_grow_zone()
1666 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); in deferred_grow_zone()
1701 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
1703 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
1710 struct zone *zone; in page_alloc_init_late() local
1738 for_each_populated_zone(zone) in page_alloc_init_late()
1739 set_zone_contiguous(zone); in page_alloc_init_late()
1787 static inline void expand(struct zone *zone, struct page *page, in expand() argument
1797 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
1805 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
1940 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1949 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
1957 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1983 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
1986 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1989 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
1998 static int move_freepages(struct zone *zone, in move_freepages() argument
2029 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2047 &zone->free_area[order].free_list[migratetype]); in move_freepages()
2055 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2068 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2070 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2073 return move_freepages(zone, start_page, end_page, migratetype, in move_freepages_block()
2129 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2156 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2195 area = &zone->free_area[current_order]; in steal_suitable_fallback()
2240 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2250 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2251 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2254 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2257 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2264 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2266 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2270 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2288 struct zone *zone; in unreserve_highatomic_pageblock() local
2293 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in unreserve_highatomic_pageblock()
2299 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2303 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2305 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2328 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2330 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2343 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2346 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2350 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2367 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) in __rmqueue_fallback() argument
2382 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2408 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2425 steal_suitable_fallback(zone, page, start_migratetype, can_steal); in __rmqueue_fallback()
2439 __rmqueue(struct zone *zone, unsigned int order, int migratetype) in __rmqueue() argument
2444 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2447 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2449 if (!page && __rmqueue_fallback(zone, order, migratetype)) in __rmqueue()
2462 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2468 spin_lock(&zone->lock); in rmqueue_bulk()
2470 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
2490 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2500 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2501 spin_unlock(&zone->lock); in rmqueue_bulk()
2514 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2523 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
2535 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2542 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
2546 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
2559 struct zone *zone; in drain_pages() local
2561 for_each_populated_zone(zone) { in drain_pages()
2562 drain_pages_zone(cpu, zone); in drain_pages()
2572 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
2576 if (zone) in drain_local_pages()
2577 drain_pages_zone(cpu, zone); in drain_local_pages()
2603 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
2626 if (!zone) in drain_all_pages()
2639 struct zone *z; in drain_all_pages()
2642 if (zone) { in drain_all_pages()
2643 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
2680 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
2687 if (zone_is_empty(zone)) in mark_free_pages()
2690 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
2692 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
2693 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
2702 if (page_zone(page) != zone) in mark_free_pages()
2711 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
2724 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
2742 struct zone *zone = page_zone(page); in free_unref_page_commit() local
2758 free_one_page(zone, page, pfn, 0, migratetype); in free_unref_page_commit()
2764 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
2769 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
2851 struct zone *zone; in __isolate_free_page() local
2856 zone = page_zone(page); in __isolate_free_page()
2866 watermark = min_wmark_pages(zone) + (1UL << order); in __isolate_free_page()
2867 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
2870 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2875 zone->free_area[order].nr_free--; in __isolate_free_page()
2902 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
2925 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
2933 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
2949 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
2950 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
2959 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
2961 page = __rmqueue_pcplist(zone, migratetype, pcp, list); in rmqueue_pcplist()
2964 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
2974 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
2975 struct zone *zone, unsigned int order, in rmqueue() argument
2983 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
2993 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
2998 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3003 page = __rmqueue(zone, order, migratetype); in rmqueue()
3005 spin_unlock(&zone->lock); in rmqueue()
3008 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3012 zone_statistics(preferred_zone, zone); in rmqueue()
3016 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3108 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3187 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3194 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3220 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3233 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3235 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3239 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3254 struct zone *zone; in get_page_from_freelist() local
3261 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in get_page_from_freelist()
3268 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3290 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3293 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3294 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3299 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; in get_page_from_freelist()
3300 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3310 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3320 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3323 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3333 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3342 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3352 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3359 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3574 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
3576 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
3577 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
3678 struct zone *zone; in should_compact_retry() local
3690 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_compact_retry()
3692 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
3816 struct zone *zone; in wake_all_kswapds() local
3820 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx, in wake_all_kswapds()
3822 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
3823 wakeup_kswapd(zone, gfp_mask, order, high_zoneidx); in wake_all_kswapds()
3824 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
3923 struct zone *zone; in should_reclaim_retry() local
3951 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in should_reclaim_retry()
3955 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
3958 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
3959 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
3965 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
3979 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4098 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
4652 struct zone *zone; in nr_free_zone_pages() local
4659 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
4660 unsigned long size = zone->managed_pages; in nr_free_zone_pages()
4661 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
4692 static inline void show_node(struct zone *zone) in show_node() argument
4695 printk("Node %d ", zone_to_nid(zone)); in show_node()
4704 struct zone *zone; in si_mem_available() local
4710 for_each_zone(zone) in si_mem_available()
4711 wmark_low += zone->watermark[WMARK_LOW]; in si_mem_available()
4778 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
4780 if (is_highmem(zone)) { in si_meminfo_node()
4781 managed_highpages += zone->managed_pages; in si_meminfo_node()
4782 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
4857 struct zone *zone; in show_free_areas() local
4860 for_each_populated_zone(zone) { in show_free_areas()
4861 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
4865 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
4943 for_each_populated_zone(zone) { in show_free_areas()
4946 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
4951 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
4953 show_node(zone); in show_free_areas()
4976 zone->name, in show_free_areas()
4977 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
4978 K(min_wmark_pages(zone)), in show_free_areas()
4979 K(low_wmark_pages(zone)), in show_free_areas()
4980 K(high_wmark_pages(zone)), in show_free_areas()
4981 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
4982 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
4983 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
4984 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
4985 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
4986 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
4987 K(zone->present_pages), in show_free_areas()
4988 K(zone->managed_pages), in show_free_areas()
4989 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
4990 zone_page_state(zone, NR_KERNEL_STACK_KB), in show_free_areas()
4991 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
4992 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
4994 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
4995 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
4998 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5002 for_each_populated_zone(zone) { in show_free_areas()
5007 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5009 show_node(zone); in show_free_areas()
5010 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5012 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5014 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5026 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5043 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5045 zoneref->zone = zone; in zoneref_set_zone()
5046 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5056 struct zone *zone; in build_zonerefs_node() local
5062 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5063 if (managed_zone(zone)) { in build_zonerefs_node()
5064 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5207 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5222 zonerefs->zone = NULL; in build_thisnode_zonelists()
5280 return zone_to_nid(z->zone); in local_memory_node()
5321 zonerefs->zone = NULL; in build_zonelists()
5457 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
5501 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in memmap_init_zone()
5519 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
5545 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
5549 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
5550 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
5555 #define memmap_init(size, nid, zone, start_pfn) \ argument
5556 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
5559 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
5568 batch = zone->managed_pages / 1024; in zone_batchsize()
5674 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
5679 (zone->managed_pages / in pageset_set_high_and_batch()
5682 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
5685 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
5687 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
5690 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
5693 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
5696 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
5698 zone_pageset_init(zone, cpu); in setup_zone_pageset()
5708 struct zone *zone; in setup_per_cpu_pageset() local
5710 for_each_populated_zone(zone) in setup_per_cpu_pageset()
5711 setup_zone_pageset(zone); in setup_per_cpu_pageset()
5718 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
5725 zone->pageset = &boot_pageset; in zone_pcp_init()
5727 if (populated_zone(zone)) in zone_pcp_init()
5729 zone->name, zone->present_pages, in zone_pcp_init()
5730 zone_batchsize(zone)); in zone_pcp_init()
5733 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
5737 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
5739 pgdat->nr_zones = zone_idx(zone) + 1; in init_currently_empty_zone()
5741 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
5746 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
5749 zone_init_free_lists(zone); in init_currently_empty_zone()
5750 zone->initialized = 1; in init_currently_empty_zone()
6036 unsigned int zone; in zone_spanned_pages_in_node() local
6039 for (zone = 0; zone < zone_type; zone++) in zone_spanned_pages_in_node()
6040 *zone_start_pfn += zones_size[zone]; in zone_spanned_pages_in_node()
6071 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6085 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6087 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6088 zone->spanned_pages = size; in calculate_node_totalpages()
6089 zone->present_pages = real_size; in calculate_node_totalpages()
6123 struct zone *zone, in setup_usemap() argument
6128 zone->pageblock_flags = NULL; in setup_usemap()
6130 zone->pageblock_flags = in setup_usemap()
6135 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
6231 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
6234 zone->managed_pages = remaining_pages; in zone_init_internals()
6235 zone_set_nid(zone, nid); in zone_init_internals()
6236 zone->name = zone_names[idx]; in zone_init_internals()
6237 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
6238 spin_lock_init(&zone->lock); in zone_init_internals()
6239 zone_seqlock_init(zone); in zone_init_internals()
6240 zone_pcp_init(zone); in zone_init_internals()
6280 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
6282 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
6284 size = zone->spanned_pages; in free_area_init_core()
6285 freesize = zone->present_pages; in free_area_init_core()
6324 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
6330 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
6331 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
6810 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
6811 if (populated_zone(zone)) { in check_for_memory()
7150 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
7155 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
7156 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
7160 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
7162 if (max > zone->managed_pages) in calculate_totalreserve_pages()
7163 max = zone->managed_pages; in calculate_totalreserve_pages()
7186 struct zone *zone = pgdat->node_zones + j; in setup_per_zone_lowmem_reserve() local
7187 unsigned long managed_pages = zone->managed_pages; in setup_per_zone_lowmem_reserve()
7189 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
7193 struct zone *lower_zone; in setup_per_zone_lowmem_reserve()
7218 struct zone *zone; in __setup_per_zone_wmarks() local
7222 for_each_zone(zone) { in __setup_per_zone_wmarks()
7223 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
7224 lowmem_pages += zone->managed_pages; in __setup_per_zone_wmarks()
7227 for_each_zone(zone) { in __setup_per_zone_wmarks()
7230 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
7231 tmp = (u64)pages_min * zone->managed_pages; in __setup_per_zone_wmarks()
7233 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
7245 min_pages = zone->managed_pages / 1024; in __setup_per_zone_wmarks()
7247 zone->watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
7253 zone->watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
7262 mult_frac(zone->managed_pages, in __setup_per_zone_wmarks()
7265 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
7266 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
7268 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
7386 struct zone *zone; in setup_min_unmapped_ratio() local
7391 for_each_zone(zone) in setup_min_unmapped_ratio()
7392 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * in setup_min_unmapped_ratio()
7414 struct zone *zone; in setup_min_slab_ratio() local
7419 for_each_zone(zone) in setup_min_slab_ratio()
7420 zone->zone_pgdat->min_slab_pages += (zone->managed_pages * in setup_min_slab_ratio()
7464 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
7487 for_each_populated_zone(zone) { in percpu_pagelist_fraction_sysctl_handler()
7491 pageset_set_high_and_batch(zone, in percpu_pagelist_fraction_sysctl_handler()
7492 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler()
7658 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, in has_unmovable_pages() argument
7749 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); in has_unmovable_pages()
7798 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
7843 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
7914 drain_all_pages(cc.zone); in alloc_contig_range()
7985 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
7990 pageset_set_high_and_batch(zone, in zone_pcp_update()
7991 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update()
7996 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
8004 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
8006 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
8007 drain_zonestat(zone, pset); in zone_pcp_reset()
8009 free_percpu(zone->pageset); in zone_pcp_reset()
8010 zone->pageset = &boot_pageset; in zone_pcp_reset()
8024 struct zone *zone; in __offline_isolated_pages() local
8035 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
8036 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
8063 zone->free_area[order].nr_free--; in __offline_isolated_pages()
8068 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
8074 struct zone *zone = page_zone(page); in is_free_buddy_page() local
8079 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
8086 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
8099 struct zone *zone = page_zone(page); in set_hwpoison_free_buddy_page() local
8105 spin_lock_irqsave(&zone->lock, flags); in set_hwpoison_free_buddy_page()
8115 spin_unlock_irqrestore(&zone->lock, flags); in set_hwpoison_free_buddy_page()