Lines Matching full:node
33 * Track per-node information needed to setup the boot memory allocator, the
34 * per-node areas, and the real VM.
50 * To prevent cache aliasing effects, align per-node structures so that they
51 * start at addresses that are strided by node number.
54 #define NODEDATA_ALIGN(addr, node) \ argument
56 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
59 * build_node_maps - callback to setup mem_data structs for each node
62 * @node: node where this range resides
65 * treat as a virtually contiguous block (i.e. each node). Each such block
71 int node) in build_node_maps() argument
78 if (!mem_data[node].min_pfn) { in build_node_maps()
79 mem_data[node].min_pfn = spfn; in build_node_maps()
80 mem_data[node].max_pfn = epfn; in build_node_maps()
82 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn); in build_node_maps()
83 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn); in build_node_maps()
90 * early_nr_cpus_node - return number of cpus on a given node
91 * @node: node to check
93 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
95 * called yet. Note that node 0 will also count all non-existent cpus.
97 static int __meminit early_nr_cpus_node(int node) in early_nr_cpus_node() argument
102 if (node == node_cpuid[cpu].nid) in early_nr_cpus_node()
110 * @node: the node id.
112 static unsigned long __meminit compute_pernodesize(int node) in compute_pernodesize() argument
116 cpus = early_nr_cpus_node(node); in compute_pernodesize()
118 pernodesize += node * L1_CACHE_BYTES; in compute_pernodesize()
127 * per_cpu_node_setup - setup per-cpu areas on each node
128 * @cpu_data: per-cpu area on this node
129 * @node: node to setup
132 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
135 static void *per_cpu_node_setup(void *cpu_data, int node) in per_cpu_node_setup() argument
143 if (node != node_cpuid[cpu].nid) in per_cpu_node_setup()
154 * area for cpu0 is on the correct node and its in per_cpu_node_setup()
188 int node, prev_node, unit, nr_units; in setup_per_cpu_areas() local
202 /* build cpu_map, units are grouped by node */ in setup_per_cpu_areas()
204 for_each_node(node) in setup_per_cpu_areas()
206 if (node == node_cpuid[cpu].nid) in setup_per_cpu_areas()
226 * CPUs are put into groups according to node. Walk cpu_map in setup_per_cpu_areas()
227 * and create new groups at node boundaries. in setup_per_cpu_areas()
233 node = node_cpuid[cpu].nid; in setup_per_cpu_areas()
235 if (node == prev_node) { in setup_per_cpu_areas()
239 prev_node = node; in setup_per_cpu_areas()
254 * @node: the node id.
258 static void __init fill_pernode(int node, unsigned long pernode, in fill_pernode() argument
262 int cpus = early_nr_cpus_node(node); in fill_pernode()
264 mem_data[node].pernode_addr = pernode; in fill_pernode()
265 mem_data[node].pernode_size = pernodesize; in fill_pernode()
270 pernode += node * L1_CACHE_BYTES; in fill_pernode()
272 pgdat_list[node] = __va(pernode); in fill_pernode()
275 mem_data[node].node_data = __va(pernode); in fill_pernode()
279 cpu_data = per_cpu_node_setup(cpu_data, node); in fill_pernode()
285 * find_pernode_space - allocate memory for memory map and per-node structures
288 * @node: node where this range resides
291 * pg_data_ts and the per-node data struct. Each node will have something like
296 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
298 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
313 int node) in find_pernode_space() argument
322 * Make sure this memory falls within this node's usable memory in find_pernode_space()
325 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn) in find_pernode_space()
328 /* Don't setup this node's local space twice... */ in find_pernode_space()
329 if (mem_data[node].pernode_addr) in find_pernode_space()
336 pernodesize = compute_pernodesize(node); in find_pernode_space()
337 pernode = NODEDATA_ALIGN(start, node); in find_pernode_space()
341 fill_pernode(node, pernode, pernodesize); in find_pernode_space()
347 * reserve_pernode_space - reserve memory for per-node space
349 * Reserve the space used by the bootmem maps & per-node space in the boot
356 int node; in reserve_pernode_space() local
358 for_each_online_node(node) { in reserve_pernode_space()
359 if (node_isset(node, memory_less_mask)) in reserve_pernode_space()
362 /* Now the per-node space */ in reserve_pernode_space()
363 size = mem_data[node].pernode_size; in reserve_pernode_space()
364 base = __pa(mem_data[node].pernode_addr); in reserve_pernode_space()
372 int node; in scatter_node_data() local
377 * because we are halfway through initialization of the new node's in scatter_node_data()
378 * structures. If for_each_online_node() is used, a new node's in scatter_node_data()
382 for_each_node(node) { in scatter_node_data()
383 if (pgdat_list[node]) { in scatter_node_data()
384 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; in scatter_node_data()
391 * initialize_pernode_data - fixup per-cpu & per-node pointers
393 * Each node's per-node area has a copy of the global pg_data_t list, so
394 * we copy that to each node here, as well as setting the per-cpu pointer
395 * to the local node data structure.
399 int cpu, node; in initialize_pernode_data() local
406 node = node_cpuid[cpu].nid; in initialize_pernode_data()
408 mem_data[node].node_data; in initialize_pernode_data()
414 node = node_cpuid[cpu].nid; in initialize_pernode_data()
417 cpu0_cpu_info->node_data = mem_data[node].node_data; in initialize_pernode_data()
424 * node but fall back to any other node when __alloc_bootmem_node fails
426 * @nid: node id
427 * @pernodesize: size of this node's pernode data
433 int bestnode = NUMA_NO_NODE, node, anynode = 0; in memory_less_node_alloc() local
435 for_each_online_node(node) { in memory_less_node_alloc()
436 if (node_isset(node, memory_less_mask)) in memory_less_node_alloc()
438 else if (node_distance(nid, node) < best) { in memory_less_node_alloc()
439 best = node_distance(nid, node); in memory_less_node_alloc()
440 bestnode = node; in memory_less_node_alloc()
442 anynode = node; in memory_less_node_alloc()
468 int node; in memory_less_nodes() local
470 for_each_node_mask(node, memory_less_mask) { in memory_less_nodes()
471 pernodesize = compute_pernodesize(node); in memory_less_nodes()
472 pernode = memory_less_node_alloc(node, pernodesize); in memory_less_nodes()
473 fill_pernode(node, __pa(pernode), pernodesize); in memory_less_nodes()
483 * allocate the per-cpu and per-node structures.
487 int node; in find_memory() local
493 printk(KERN_ERR "node info missing!\n"); in find_memory()
506 for_each_online_node(node) in find_memory()
507 if (mem_data[node].min_pfn) in find_memory()
508 node_clear(node, memory_less_mask); in find_memory()
542 * call_pernode_memory - use SRAT to call callback functions with node info
548 * out to which node a block of memory belongs. Ignore memory that we cannot
568 /* No SRAT table, so assume one node (node 0) */ in call_pernode_memory()
590 * paging_init() sets up the page tables for each node of the system and frees
598 int node; in paging_init() local
613 for_each_online_node(node) { in paging_init()
614 pfn_offset = mem_data[node].min_pfn; in paging_init()
617 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; in paging_init()
619 if (mem_data[node].max_pfn > max_pfn) in paging_init()
620 max_pfn = mem_data[node].max_pfn; in paging_init()
654 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, in vmemmap_populate() argument
657 return vmemmap_populate_basepages(start, end, node, NULL); in vmemmap_populate()