Lines Matching full:zone
118 * cachelines. There are very few zone structures in the machine, so space
135 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
439 * faulted, they come from the right zone right away. However, it is
443 * to a different zone. When migration fails - pinning fails.
464 * on different platforms may end up in a movable zone. ZERO_PAGE(0)
467 * memory to the MOVABLE zone, the vmemmap pages are also placed in
468 * such zone. Such pages cannot be really moved around as they are
490 struct zone { struct
493 /* zone watermarks, access with *_wmark_pages(zone) macros */
502 * wasting several GB of ram we must reserve some of the lower zone argument
535 * spanned_pages is the total pages spanned by the zone, including argument
539 * present_pages is physical pages existing within the zone, which
543 * present_early_pages is present pages existing within the zone
564 * It is a seqlock because it has to be read outside of zone->lock,
568 * The span_seq lock is declared along with zone->lock because it is
569 * frequently read in proximity to zone->lock. It's good to
592 * of pageblock. Protected by zone->lock. argument
610 /* zone flags, see below */ argument
655 /* Zone statistics */
672 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. argument
675 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
678 static inline unsigned long zone_managed_pages(struct zone *zone) in zone_managed_pages() argument
680 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages()
683 static inline unsigned long zone_cma_pages(struct zone *zone) in zone_cma_pages() argument
686 return zone->cma_pages; in zone_cma_pages()
692 static inline unsigned long zone_end_pfn(const struct zone *zone) in zone_end_pfn() argument
694 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn()
697 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) in zone_spans_pfn() argument
699 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn()
702 static inline bool zone_is_initialized(struct zone *zone) in zone_is_initialized() argument
704 return zone->initialized; in zone_is_initialized()
707 static inline bool zone_is_empty(struct zone *zone) in zone_is_empty() argument
709 return zone->spanned_pages == 0; in zone_is_empty()
714 * intersection with the given zone
716 static inline bool zone_intersects(struct zone *zone, in zone_intersects() argument
719 if (zone_is_empty(zone)) in zone_intersects()
721 if (start_pfn >= zone_end_pfn(zone) || in zone_intersects()
722 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
751 * This struct contains information about a zone in a zonelist. It is stored
755 struct zone *zone; /* Pointer to actual zone */ member
756 int zone_idx; /* zone_idx(zoneref->zone) */
765 * To speed the reading of the zonelist, the zonerefs contain the zone index
769 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
770 * zonelist_zone_idx() - Return the index of the zone for an entry
798 * per-zone basis.
806 struct zone node_zones[MAX_NR_ZONES];
833 * Nests above zone->lock and zone->span_seqlock
930 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
932 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
935 bool zone_watermark_ok(struct zone *z, unsigned int order,
938 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
949 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
970 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
972 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) argument
975 static inline bool zone_is_zone_device(struct zone *zone) in zone_is_zone_device() argument
977 return zone_idx(zone) == ZONE_DEVICE; in zone_is_zone_device()
980 static inline bool zone_is_zone_device(struct zone *zone) in zone_is_zone_device() argument
987 * Returns true if a zone has pages managed by the buddy allocator.
989 * populated_zone(). If the whole zone is reserved then we can easily
992 static inline bool managed_zone(struct zone *zone) in managed_zone() argument
994 return zone_managed_pages(zone); in managed_zone()
997 /* Returns true if a zone has memory */
998 static inline bool populated_zone(struct zone *zone) in populated_zone() argument
1000 return zone->present_pages; in populated_zone()
1004 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument
1006 return zone->node; in zone_to_nid()
1009 static inline void zone_set_nid(struct zone *zone, int nid) in zone_set_nid() argument
1011 zone->node = nid; in zone_set_nid()
1014 static inline int zone_to_nid(struct zone *zone) in zone_to_nid() argument
1019 static inline void zone_set_nid(struct zone *zone, int nid) {} in zone_set_nid() argument
1035 * is_highmem - helper function to quickly check if a struct zone is a
1036 * highmem zone or not. This is an attempt to keep references
1038 * @zone: pointer to struct zone variable
1039 * Return: 1 for a highmem zone, 0 otherwise
1041 static inline int is_highmem(struct zone *zone) in is_highmem() argument
1044 return is_highmem_idx(zone_idx(zone)); in is_highmem()
1050 /* These two functions are used to setup the per zone pages min values */
1089 extern struct zone *next_zone(struct zone *zone);
1101 * @zone: pointer to struct zone variable
1103 * The user only needs to declare the zone variable, for_each_zone
1106 #define for_each_zone(zone) \ argument
1107 for (zone = (first_online_pgdat())->node_zones; \
1108 zone; \
1109 zone = next_zone(zone))
1111 #define for_each_populated_zone(zone) \ argument
1112 for (zone = (first_online_pgdat())->node_zones; \
1113 zone; \
1114 zone = next_zone(zone)) \
1115 if (!populated_zone(zone)) \
1119 static inline struct zone *zonelist_zone(struct zoneref *zoneref) in zonelist_zone()
1121 return zoneref->zone; in zonelist_zone()
1131 return zone_to_nid(zoneref->zone); in zonelist_node_idx()
1139 …* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodem…
1141 * @highest_zoneidx: The zone index of the highest zone to return
1144 * This function returns the next zone at or below a given zone index that is
1146 * search. The zoneref returned is a cursor that represents the current zone
1150 * Return: the next zone at or below highest_zoneidx within the allowed
1163 …* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nod…
1164 * @zonelist: The zonelist to search for a suitable zone
1165 * @highest_zoneidx: The zone index of the highest zone to return
1168 * This function returns the first zone at or below a given zone index that is
1173 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
1177 * Return: Zoneref pointer for the first suitable zone found
1188 …elper macro to iterate over valid zones in a zonelist at or below a given zone index and within a …
1189 * @zone: The current zone in the iterator
1192 * @highidx: The zone index of the highest zone to return
1195 * This iterator iterates though all zones at or below a given zone index and
1198 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ argument
1199 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1200 zone; \
1202 zone = zonelist_zone(z))
1204 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ argument
1205 for (zone = z->zone; \
1206 zone; \
1208 zone = zonelist_zone(z))
1212 …ne_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
1213 * @zone: The current zone in the iterator
1216 * @highidx: The zone index of the highest zone to return
1218 * This iterator iterates though all zones at or below a given zone index.
1220 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ argument
1221 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1284 /* See declaration of similar field in struct zone */