Lines Matching full:node
34 * Track per-node information needed to setup the boot memory allocator, the
35 * per-node areas, and the real VM.
51 * To prevent cache aliasing effects, align per-node structures so that they
52 * start at addresses that are strided by node number.
55 #define NODEDATA_ALIGN(addr, node) \ argument
57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
60 * build_node_maps - callback to setup mem_data structs for each node
63 * @node: node where this range resides
66 * treat as a virtually contiguous block (i.e. each node). Each such block
72 int node) in build_node_maps() argument
79 if (!mem_data[node].min_pfn) { in build_node_maps()
80 mem_data[node].min_pfn = spfn; in build_node_maps()
81 mem_data[node].max_pfn = epfn; in build_node_maps()
83 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn); in build_node_maps()
84 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn); in build_node_maps()
91 * early_nr_cpus_node - return number of cpus on a given node
92 * @node: node to check
94 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
96 * called yet. Note that node 0 will also count all non-existent cpus.
98 static int early_nr_cpus_node(int node) in early_nr_cpus_node() argument
103 if (node == node_cpuid[cpu].nid) in early_nr_cpus_node()
111 * @node: the node id.
113 static unsigned long compute_pernodesize(int node) in compute_pernodesize() argument
117 cpus = early_nr_cpus_node(node); in compute_pernodesize()
119 pernodesize += node * L1_CACHE_BYTES; in compute_pernodesize()
128 * per_cpu_node_setup - setup per-cpu areas on each node
129 * @cpu_data: per-cpu area on this node
130 * @node: node to setup
133 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
136 static void *per_cpu_node_setup(void *cpu_data, int node) in per_cpu_node_setup() argument
144 if (node != node_cpuid[cpu].nid) in per_cpu_node_setup()
155 * area for cpu0 is on the correct node and its in per_cpu_node_setup()
189 int node, prev_node, unit, nr_units; in setup_per_cpu_areas() local
203 /* build cpu_map, units are grouped by node */ in setup_per_cpu_areas()
205 for_each_node(node) in setup_per_cpu_areas()
207 if (node == node_cpuid[cpu].nid) in setup_per_cpu_areas()
227 * CPUs are put into groups according to node. Walk cpu_map in setup_per_cpu_areas()
228 * and create new groups at node boundaries. in setup_per_cpu_areas()
234 node = node_cpuid[cpu].nid; in setup_per_cpu_areas()
236 if (node == prev_node) { in setup_per_cpu_areas()
240 prev_node = node; in setup_per_cpu_areas()
255 * @node: the node id.
259 static void __init fill_pernode(int node, unsigned long pernode, in fill_pernode() argument
263 int cpus = early_nr_cpus_node(node); in fill_pernode()
265 mem_data[node].pernode_addr = pernode; in fill_pernode()
266 mem_data[node].pernode_size = pernodesize; in fill_pernode()
271 pernode += node * L1_CACHE_BYTES; in fill_pernode()
273 pgdat_list[node] = __va(pernode); in fill_pernode()
276 mem_data[node].node_data = __va(pernode); in fill_pernode()
280 cpu_data = per_cpu_node_setup(cpu_data, node); in fill_pernode()
286 * find_pernode_space - allocate memory for memory map and per-node structures
289 * @node: node where this range resides
292 * pg_data_ts and the per-node data struct. Each node will have something like
297 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
299 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
314 int node) in find_pernode_space() argument
323 * Make sure this memory falls within this node's usable memory in find_pernode_space()
326 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn) in find_pernode_space()
329 /* Don't setup this node's local space twice... */ in find_pernode_space()
330 if (mem_data[node].pernode_addr) in find_pernode_space()
337 pernodesize = compute_pernodesize(node); in find_pernode_space()
338 pernode = NODEDATA_ALIGN(start, node); in find_pernode_space()
342 fill_pernode(node, pernode, pernodesize); in find_pernode_space()
348 * reserve_pernode_space - reserve memory for per-node space
350 * Reserve the space used by the bootmem maps & per-node space in the boot
357 int node; in reserve_pernode_space() local
359 for_each_online_node(node) { in reserve_pernode_space()
360 if (node_isset(node, memory_less_mask)) in reserve_pernode_space()
363 /* Now the per-node space */ in reserve_pernode_space()
364 size = mem_data[node].pernode_size; in reserve_pernode_space()
365 base = __pa(mem_data[node].pernode_addr); in reserve_pernode_space()
373 int node; in scatter_node_data() local
378 * because we are halfway through initialization of the new node's in scatter_node_data()
379 * structures. If for_each_online_node() is used, a new node's in scatter_node_data()
383 for_each_node(node) { in scatter_node_data()
384 if (pgdat_list[node]) { in scatter_node_data()
385 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; in scatter_node_data()
392 * initialize_pernode_data - fixup per-cpu & per-node pointers
394 * Each node's per-node area has a copy of the global pg_data_t list, so
395 * we copy that to each node here, as well as setting the per-cpu pointer
396 * to the local node data structure.
400 int cpu, node; in initialize_pernode_data() local
407 node = node_cpuid[cpu].nid; in initialize_pernode_data()
409 mem_data[node].node_data; in initialize_pernode_data()
415 node = node_cpuid[cpu].nid; in initialize_pernode_data()
418 cpu0_cpu_info->node_data = mem_data[node].node_data; in initialize_pernode_data()
425 * node but fall back to any other node when __alloc_bootmem_node fails
427 * @nid: node id
428 * @pernodesize: size of this node's pernode data
434 int bestnode = NUMA_NO_NODE, node, anynode = 0; in memory_less_node_alloc() local
436 for_each_online_node(node) { in memory_less_node_alloc()
437 if (node_isset(node, memory_less_mask)) in memory_less_node_alloc()
439 else if (node_distance(nid, node) < best) { in memory_less_node_alloc()
440 best = node_distance(nid, node); in memory_less_node_alloc()
441 bestnode = node; in memory_less_node_alloc()
443 anynode = node; in memory_less_node_alloc()
469 int node; in memory_less_nodes() local
471 for_each_node_mask(node, memory_less_mask) { in memory_less_nodes()
472 pernodesize = compute_pernodesize(node); in memory_less_nodes()
473 pernode = memory_less_node_alloc(node, pernodesize); in memory_less_nodes()
474 fill_pernode(node, __pa(pernode), pernodesize); in memory_less_nodes()
484 * allocate the per-cpu and per-node structures.
488 int node; in find_memory() local
494 printk(KERN_ERR "node info missing!\n"); in find_memory()
507 for_each_online_node(node) in find_memory()
508 if (mem_data[node].min_pfn) in find_memory()
509 node_clear(node, memory_less_mask); in find_memory()
543 * call_pernode_memory - use SRAT to call callback functions with node info
549 * out to which node a block of memory belongs. Ignore memory that we cannot
569 /* No SRAT table, so assume one node (node 0) */ in call_pernode_memory()
591 * paging_init() sets up the page tables for each node of the system and frees
632 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, in vmemmap_populate() argument
635 return vmemmap_populate_basepages(start, end, node, NULL); in vmemmap_populate()