/Linux-v6.1/include/linux/ |
D | oom.h | 13 struct zonelist; 31 struct zonelist *zonelist; member
|
D | mmzone.h | 1070 struct zonelist { struct 1110 struct zonelist node_zonelists[MAX_ZONELISTS]; 1501 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, in first_zones_zonelist() argument 1505 return next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist() 1548 struct zonelist *zonelist; in movable_only_nodes() local 1561 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; in movable_only_nodes() 1562 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); in movable_only_nodes()
|
D | swap.h | 421 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
D | gfp.h | 165 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
|
/Linux-v6.1/mm/ |
D | mm_init.c | 34 struct zonelist *zonelist; in mminit_verify_zonelist() local 43 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist() 54 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist()
|
D | page_alloc.c | 2925 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock() local 2933 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock() 4393 .zonelist = ac->zonelist, in __alloc_pages_may_oom() 4650 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry() 4758 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim() 4812 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds() 4951 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry() 5067 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath() 5078 struct zoneref *z = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath() 5168 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath() [all …]
|
D | vmscan.c | 6300 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument 6321 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones() 6423 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, in do_try_to_free_pages() argument 6441 shrink_zones(zonelist, sc); in do_try_to_free_pages() 6458 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages() 6561 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, in throttle_direct_reclaim() argument 6599 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim() 6641 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, in try_to_free_pages() argument 6670 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) in try_to_free_pages() 6676 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages() [all …]
|
D | internal.h | 212 struct zonelist *zonelist; member
|
D | oom_kill.c | 270 if (!oc->zonelist) in constrained_alloc() 294 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
|
D | mempolicy.c | 1933 struct zonelist *zonelist; in mempolicy_slab_node() local 1935 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; in mempolicy_slab_node() 1936 z = first_zones_zonelist(zonelist, highest_zoneidx, in mempolicy_slab_node()
|
D | slab.c | 3072 struct zonelist *zonelist; in fallback_alloc() local 3086 zonelist = node_zonelist(mempolicy_slab_node(), flags); in fallback_alloc() 3093 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { in fallback_alloc()
|
D | compaction.c | 2274 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable() 2595 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
|
D | slub.c | 2257 struct zonelist *zonelist; in get_any_partial() local 2288 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); in get_any_partial() 2289 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { in get_any_partial()
|
D | hugetlb.c | 1171 struct zonelist *zonelist; in dequeue_huge_page_nodemask() local 1176 zonelist = node_zonelist(nid, gfp_mask); in dequeue_huge_page_nodemask() 1180 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_huge_page_nodemask()
|
D | memcontrol.c | 1688 .zonelist = NULL, in mem_cgroup_out_of_memory()
|
/Linux-v6.1/Documentation/translations/zh_CN/mm/ |
D | numa.rst | 57 中的一个或多个]构建了一个有序的“区列表”。zonelist指定了当一个选定的区/节点不能满足分配请求 63 代表了相对稀缺的资源。Linux选择了一个默认的Node ordered zonelist。这意味着在使用按NUMA距
|
/Linux-v6.1/Documentation/mm/ |
D | numa.rst | 76 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a 86 a default Node ordered zonelist. This means it tries to fallback to other zones 91 Linux will attempt to allocate from the first node in the appropriate zonelist 94 nodes' zones in the selected zonelist looking for the first zone in the list 122 zonelist--will not be the node itself. Rather, it will be the node that the
|
/Linux-v6.1/drivers/tty/ |
D | sysrq.c | 389 .zonelist = node_zonelist(first_memory_node, gfp_mask), in moom_callback()
|
/Linux-v6.1/Documentation/admin-guide/sysctl/ |
D | vm.rst | 648 In non-NUMA case, a zonelist for GFP_KERNEL is ordered as following. 654 Assume 2 node NUMA and below is zonelist of Node(0)'s GFP_KERNEL::
|
/Linux-v6.1/Documentation/admin-guide/mm/ |
D | numa_memory_policy.rst | 236 node zonelist.
|
/Linux-v6.1/Documentation/admin-guide/ |
D | kernel-parameters.txt | 3860 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
|