Lines Matching +full:a +full:- +full:z

1 /* SPDX-License-Identifier: GPL-2.0 */
18 #include <linux/pageblock-flags.h>
19 #include <linux/page-flags-layout.h>
22 #include <linux/page-flags.h>
26 /* Free memory management - zoned buddy allocator. */
32 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
55 * The way to use it is to change migratetype of a range of
58 * is that a range of pageblocks must be aligned to
60 * a single pageblock.
92 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
105 return list_first_entry_or_null(&area->free_list[migratetype], in get_page_from_free_area()
111 return list_empty(&area->free_list[migratetype]); in free_area_empty()
117 * Add a wild amount of padding here to ensure data fall into separate
119 * consumption is not a concern here.
202 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
241 * Global and per-node slab counters track slab pages. in vmstat_item_in_bytes()
245 * Per-memcg and per-lruvec counters track memory, consumed in vmstat_item_in_bytes()
247 * byte-precise. in vmstat_item_in_bytes()
293 * backed by a congested BDI
302 * These track the cost of reclaiming one LRU - file or anon -
308 /* Non-resident age, driven by LRU movement */
352 #define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
354 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) argument
355 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) argument
356 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) argument
357 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) argument
369 /* Lists of pages, one per migrate type stored on the pcp-lists */
381 * on demand. Use a large type to avoid the overhead of
401 * DMA addressing constraints. This distinction is important as a 32bit
402 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
420 * A memory area that is only addressable by the kernel through
433 * likely to succeed, and to locally limit unmovable allocations - e.g.,
436 * 1. Pinned pages: (long-term) pinning of movable pages might
438 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
443 * to a different zone. When migration fails - pinning fails.
455 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
457 * some cases (virtio-mem), such pages can be skipped during
461 * of memory unplug in virtio-mem).
464 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
466 * 7. Memory-hotplug: when using memmap_on_memory and onlining the
469 * self-stored in the range, but they are treated as movable when
491 /* Read-mostly fields */
525 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
537 * spanned_pages = zone_end_pfn - zone_start_pfn;
541 * present_pages = spanned_pages - absent_pages(pages in holes);
550 * managed_pages = present_pages - reserved_pages;
557 * (present_pages - managed_pages). And managed_pages should be used
564 * It is a seqlock because it has to be read outside of zone->lock,
568 * The span_seq lock is declared along with zone->lock because it is
569 * frequently read in proximity to zone->lock. It's good to
570 * give them a chance of being in the same cacheline.
574 * present_pages should get_online_mems() to get a stable value.
592 * of pageblock. Protected by zone->lock.
604 /* Write-intensive fields used from the page allocator */
616 /* Write-intensive fields used by compaction and vmstats. */
621 * when reading the number of free pages to avoid per-cpu counter
680 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
686 return zone->cma_pages; in zone_cma_pages()
694 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
699 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
704 return zone->initialized; in zone_is_initialized()
709 return zone->spanned_pages == 0; in zone_is_empty()
713 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
722 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
730 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
735 /* Maximum number of zones on a zonelist */
743 * restrict the allocations to a single node for __GFP_THISNODE.
751 * This struct contains information about a zone in a zonelist. It is stored
756 int zone_idx; /* zone_idx(zoneref->zone) */
760 * One allocation request operates on a zonelist. A zonelist
761 * is a list of zones, the first one is the 'goal' of the
767 * a struct zoneref are
769 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
770 * zonelist_zone_idx() - Return the index of the zone for an entry
771 * zonelist_node_idx() - Return the index of the node for an entry
793 * On NUMA machines, each NUMA node would have a pg_data_t to describe
794 * it's memory layout. On UMA machines there is a single pglist_data which
797 * Memory statistics and page replacement data structures are maintained on a
798 * per-zone basis.
826 * Also synchronizes pgdat->first_deferred_pfn during deferred page
833 * Nests above zone->lock and zone->span_seqlock
859 * This is a per-node reserve of pages that are not available
872 /* Write-intensive fields used by page reclaim */
900 /* Per-node vmstats */
905 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
906 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
908 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
910 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
914 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
919 return pgdat->node_start_pfn + pgdat->node_spanned_pages; in pgdat_end_pfn()
924 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; in pgdat_is_empty()
932 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
935 bool zone_watermark_ok(struct zone *z, unsigned int order,
938 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
957 return lruvec->pgdat; in lruvec_pgdat()
972 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
987 * Returns true if a zone has pages managed by the buddy allocator.
997 /* Returns true if a zone has memory */
1000 return zone->present_pages; in populated_zone()
1006 return zone->node; in zone_to_nid()
1011 zone->node = nid; in zone_set_nid()
1035 * is_highmem - helper function to quickly check if a struct zone is a
1037 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
1039 * Return: 1 for a highmem zone, 0 otherwise
1092 * for_each_online_pgdat - helper macro to iterate over all online nodes
1093 * @pgdat: pointer to a pg_data_t variable
1100 * for_each_zone - helper macro to iterate over all memory zones
1107 for (zone = (first_online_pgdat())->node_zones; \
1112 for (zone = (first_online_pgdat())->node_zones; \
1121 return zoneref->zone; in zonelist_zone()
1126 return zoneref->zone_idx; in zonelist_zone_idx()
1131 return zone_to_nid(zoneref->zone); in zonelist_node_idx()
1134 struct zoneref *__next_zones_zonelist(struct zoneref *z,
1139 …xt_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask …
1140 * @z: The cursor used as a starting point for the search
1144 * This function returns the next zone at or below a given zone index that is
1145 * within the allowed nodemask using a cursor as the starting point for the
1146 * search. The zoneref returned is a cursor that represents the current zone
1151 * nodemask using a cursor within a zonelist as a starting point
1153 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, in next_zones_zonelist() argument
1157 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) in next_zones_zonelist()
1158 return z; in next_zones_zonelist()
1159 return __next_zones_zonelist(z, highest_zoneidx, nodes); in next_zones_zonelist()
1163 …* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nod…
1164 * @zonelist: The zonelist to search for a suitable zone
1168 * This function returns the first zone at or below a given zone index that is
1169 * within the allowed nodemask. The zoneref returned is a cursor that can be
1173 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1183 return next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist()
1188 …ch_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a g…
1190 * @z: The current pointer within zonelist->_zonerefs being iterated
1195 * This iterator iterates though all zones at or below a given zone index and
1196 * within a given nodemask
1198 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ argument
1199 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1201 z = next_zones_zonelist(++z, highidx, nodemask), \
1202 zone = zonelist_zone(z))
1204 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ argument
1205 for (zone = z->zone; \
1207 z = next_zones_zonelist(++z, highidx, nodemask), \
1208 zone = zonelist_zone(z))
1212 …* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a gi…
1214 * @z: The current pointer within zonelist->zones being iterated
1218 * This iterator iterates though all zones at or below a given zone index.
1220 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ argument
1221 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1238 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1243 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1246 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1248 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1261 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1267 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
1269 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
1274 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
1294 * This is, logically, a pointer to an array of struct
1302 * Making it a UL at least makes someone do a cast
1317 * WARNING: mem_section must be a power-of-2 in size for the
1330 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1340 return ms->usage->pageblock_flags; in section_to_usemap()
1357 * a little bit of information. The pointer is calculated
1358 * as mem_map - section_nr_to_pfn(pnum). The result is
1360 * 1. All mem_map arrays are page-aligned.
1362 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1363 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1374 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1379 unsigned long map = section->section_mem_map; in __section_mem_map_addr()
1386 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); in present_section()
1396 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); in valid_section()
1401 return (section && (section->section_mem_map & SECTION_IS_EARLY)); in early_section()
1411 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); in online_section()
1418 return section && ((section->section_mem_map & flags) == flags); in online_device_section()
1448 return test_bit(idx, ms->usage->subsection_map); in pfn_section_valid()
1459 * pfn_valid - check if there is a valid memory map entry for a PFN
1462 * Check if there is a valid memory map entry aka struct page for the @pfn.
1465 * represent a hole or an unusable page frame.
1477 * match a valid pfn. in pfn_valid()
1489 * the entire section-sized span. in pfn_valid()
1509 return -1; in next_present_section_nr()