| /Linux-v5.4/include/linux/ |
| D | nodemask.h | 400 N_MEMORY, /* The node has memory(regular, high, movable) */ enumerator 437 #define first_memory_node first_node(node_states[N_MEMORY]) 444 return next_node(nid, node_states[N_MEMORY]); in next_memory_node()
|
| D | cpuset.h | 199 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
|
| /Linux-v5.4/drivers/base/ |
| D | node.c | 567 node_state(node->dev.id, N_MEMORY)) { in hugetlb_register_node() 978 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), 989 &node_state_attr[N_MEMORY].attr.attr,
|
| /Linux-v5.4/mm/ |
| D | page_isolation.c | 322 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]); in alloc_migrate_target()
|
| D | memory_hotplug.c | 648 if (!node_state(nid, N_MEMORY)) in node_states_check_changes_online() 667 node_set_state(node, N_MEMORY); in node_states_set_node() 1290 nodemask_t nmask = node_states[N_MEMORY]; in new_node_page() 1478 node_clear_state(node, N_MEMORY); in node_states_clear_node()
|
| D | page_ext.c | 374 for_each_node_state(nid, N_MEMORY) { in page_ext_init()
|
| D | page_alloc.c | 125 [N_MEMORY] = { { [0] = 1UL } }, 1942 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); in page_alloc_init_late() 1943 for_each_node_state(nid, N_MEMORY) { in page_alloc_init_late() 1971 for_each_node_state(nid, N_MEMORY) in page_alloc_init_late() 5550 for_each_node_state(n, N_MEMORY) { in find_next_best_node() 7086 node_set_state(nid, N_MEMORY); in early_calculate_totalpages() 7103 nodemask_t saved_node_state = node_states[N_MEMORY]; in find_zone_movable_pfns_for_nodes() 7105 int usable_nodes = nodes_weight(node_states[N_MEMORY]); in find_zone_movable_pfns_for_nodes() 7208 for_each_node_state(nid, N_MEMORY) { in find_zone_movable_pfns_for_nodes() 7301 node_states[N_MEMORY] = saved_node_state; in find_zone_movable_pfns_for_nodes() [all …]
|
| D | mempolicy.c | 213 cpuset_current_mems_allowed, node_states[N_MEMORY]); in mpol_set_nodemask() 2738 for_each_node_state(nid, N_MEMORY) { in numa_policy_init() 2810 if (!nodes_subset(nodes, node_states[N_MEMORY])) in mpol_parse_str() 2840 nodes = node_states[N_MEMORY]; in mpol_parse_str()
|
| D | hugetlb.c | 1895 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) in return_unused_surplus_pages() 2175 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page() 2266 &node_states[N_MEMORY], in hugetlb_hstate_alloc_pages() 2561 n_mask = &node_states[N_MEMORY]; in __nr_hugepages_store_common() 2868 for_each_node_state(nid, N_MEMORY) { in hugetlb_register_all_nodes() 3162 for_each_node_state(nid, N_MEMORY) in hugetlb_show_meminfo()
|
| D | oom_kill.c | 287 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { in constrained_alloc()
|
| D | vmstat.c | 1518 if (!node_state(pgdat->node_id, N_MEMORY)) in pagetypeinfo_show() 2055 if (!node_state(pgdat->node_id, N_MEMORY)) in unusable_show()
|
| D | compaction.c | 2703 for_each_node_state(nid, N_MEMORY) { in kcompactd_cpu_online() 2729 for_each_node_state(nid, N_MEMORY) in kcompactd_init()
|
| D | memcontrol.c | 1646 memcg->scan_nodes = node_states[N_MEMORY]; in mem_cgroup_may_update_nodemask() 1648 for_each_node_mask(nid, node_states[N_MEMORY]) { in mem_cgroup_may_update_nodemask() 3802 for_each_node_state(nid, N_MEMORY) { in memcg_numa_stat_show() 3817 for_each_node_state(nid, N_MEMORY) { in memcg_numa_stat_show()
|
| D | memory-failure.c | 1617 return new_page_nodemask(p, nid, &node_states[N_MEMORY]); in new_page()
|
| D | vmscan.c | 4062 for_each_node_state(nid, N_MEMORY) { in kswapd_cpu_online() 4117 for_each_node_state(nid, N_MEMORY) in kswapd_init()
|
| D | migrate.c | 1620 if (!node_state(node, N_MEMORY)) in do_pages_move()
|
| /Linux-v5.4/kernel/cgroup/ |
| D | cpuset.c | 412 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems() 414 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems() 3136 new_mems = node_states[N_MEMORY]; in cpuset_hotplug_workfn() 3252 top_cpuset.mems_allowed = node_states[N_MEMORY]; in cpuset_init_smp() 3256 top_cpuset.effective_mems = node_states[N_MEMORY]; in cpuset_init_smp()
|
| /Linux-v5.4/Documentation/core-api/ |
| D | memory-hotplug.rst | 70 - status_change_nid is set node id when N_MEMORY of nodemask is (will be)
|
| /Linux-v5.4/fs/proc/ |
| D | task_mmu.c | 1711 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats() 1736 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats_pmd() 1883 for_each_node_state(nid, N_MEMORY) in show_numa_map()
|
| D | kcore.c | 244 for_each_node_state(nid, N_MEMORY) { in kcore_ram_list()
|
| /Linux-v5.4/init/ |
| D | main.c | 1171 set_mems_allowed(node_states[N_MEMORY]); in kernel_init_freeable()
|
| /Linux-v5.4/arch/x86/mm/ |
| D | init_64.c | 820 node_clear_state(0, N_MEMORY); in paging_init() 821 if (N_MEMORY != N_NORMAL_MEMORY) in paging_init()
|
| /Linux-v5.4/kernel/ |
| D | kthread.c | 576 set_mems_allowed(node_states[N_MEMORY]); in kthreadd()
|
| /Linux-v5.4/Documentation/admin-guide/cgroup-v1/ |
| D | cpusets.rst | 223 automatically tracks the value of node_states[N_MEMORY]--i.e.,
|