/Linux-v4.19/arch/arm64/mm/ |
D | numa.c | 61 if (WARN_ON(node >= nr_node_ids)) in cpumask_of_node() 114 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map() 118 for (node = 0; node < nr_node_ids; node++) { in setup_node_to_cpumask_map() 124 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); in setup_node_to_cpumask_map() 288 size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); in numa_alloc_distance() 297 numa_distance_cnt = nr_node_ids; in numa_alloc_distance()
|
/Linux-v4.19/kernel/irq/ |
D | affinity.c | 47 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); in alloc_node_to_cpumask() 51 for (node = 0; node < nr_node_ids; node++) { in alloc_node_to_cpumask() 69 for (node = 0; node < nr_node_ids; node++) in free_node_to_cpumask()
|
/Linux-v4.19/kernel/sched/ |
D | topology.c | 195 if (nr_node_ids == 1) in sd_parent_degenerate() 1249 for (i = 0; i < nr_node_ids; i++) { in sched_numa_warn() 1251 for (j = 0; j < nr_node_ids; j++) in sched_numa_warn() 1332 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); in sched_init_numa() 1348 for (i = 0; i < nr_node_ids; i++) { in sched_init_numa() 1349 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa() 1350 for (k = 0; k < nr_node_ids; k++) { in sched_init_numa() 1411 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); in sched_init_numa() 1415 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa() 1481 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_set() [all …]
|
D | fair.c | 1215 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; in task_faults_idx() 2249 4*nr_node_ids*sizeof(unsigned long); in task_numa_group() 2262 nr_node_ids; in task_numa_group() 2264 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_group() 2322 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { in task_numa_group() 2354 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_free() 2390 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; in task_numa_fault()
|
/Linux-v4.19/arch/x86/mm/ |
D | numa.c | 119 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map() 123 for (node = 0; node < nr_node_ids; node++) in setup_node_to_cpumask_map() 127 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); in setup_node_to_cpumask_map() 868 if (node >= nr_node_ids) { in cpumask_of_node() 871 node, nr_node_ids); in cpumask_of_node()
|
/Linux-v4.19/arch/x86/kernel/ |
D | setup_percpu.c | 173 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); in setup_per_cpu_areas()
|
/Linux-v4.19/include/linux/ |
D | nodemask.h | 447 extern int nr_node_ids; 488 #define nr_node_ids 1 macro
|
/Linux-v4.19/drivers/hv/ |
D | hv.c | 193 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), in hv_synic_alloc()
|
D | channel_mgmt.c | 634 if (next_node == nr_node_ids) { in init_vp_index()
|
/Linux-v4.19/mm/ |
D | slab.h | 495 for (__node = 0; __node < nr_node_ids; __node++) \
|
D | ksm.c | 2937 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), in merge_across_nodes_store() 2944 root_unstable_tree = buf + nr_node_ids; in merge_across_nodes_store() 2951 ksm_nr_node_ids = knob ? 1 : nr_node_ids; in merge_across_nodes_store()
|
D | list_lru.c | 604 size_t size = sizeof(*lru->node) * nr_node_ids; in __list_lru_init()
|
D | vmalloc.c | 2665 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); in show_numa_info() 2742 nr_node_ids * sizeof(unsigned int), NULL); in proc_vmalloc_init()
|
D | slub.c | 4200 nr_node_ids * sizeof(struct kmem_cache_node *), in kmem_cache_init() 4219 nr_cpu_ids, nr_node_ids); in kmem_cache_init() 4754 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); in show_slab_objects() 4833 for (node = 0; node < nr_node_ids; node++) in show_slab_objects()
|
D | compaction.c | 1893 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { in sysfs_compact_node()
|
D | page_alloc.c | 288 int nr_node_ids __read_mostly = MAX_NUMNODES; 290 EXPORT_SYMBOL(nr_node_ids); 6484 nr_node_ids = highest + 1; in setup_nr_node_ids()
|
D | slab.c | 690 size_t memsize = sizeof(void *) * nr_node_ids; in alloc_alien_cache() 1281 nr_node_ids * sizeof(struct kmem_cache_node *), in kmem_cache_init()
|
D | memcontrol.c | 728 VM_BUG_ON((unsigned)nid >= nr_node_ids); in mem_cgroup_node_nr_lru_pages() 4428 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); in mem_cgroup_alloc()
|
D | vmscan.c | 375 size *= nr_node_ids; in prealloc_shrinker()
|
/Linux-v4.19/arch/powerpc/mm/ |
D | numa.c | 80 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map() 88 dbg("Node to cpumask map for %d nodes\n", nr_node_ids); in setup_node_to_cpumask_map()
|
/Linux-v4.19/kernel/ |
D | workqueue.c | 3732 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); in apply_wqattrs_prepare() 4074 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); in __alloc_workqueue_key() 5656 tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL); in wq_numa_init()
|
/Linux-v4.19/net/sunrpc/ |
D | svc.c | 195 unsigned int maxpools = nr_node_ids; in svc_pool_map_init_pernode()
|
/Linux-v4.19/drivers/net/ethernet/cavium/thunder/ |
D | nic_main.c | 1226 if (nr_node_ids > 1) in nic_num_sqs_en()
|
/Linux-v4.19/drivers/char/ |
D | random.c | 823 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); in do_numa_crng_init()
|
/Linux-v4.19/kernel/bpf/ |
D | syscall.c | 505 ((unsigned int)numa_node >= nr_node_ids || in map_create()
|