Lines Matching +full:per +full:- +full:cpu

1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
34 * Track per-node information needed to setup the boot memory allocator, the
35 * per-node areas, and the real VM.
51 * To prevent cache aliasing effects, align per-node structures so that they
56 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
60 * build_node_maps - callback to setup mem_data structs for each node
68 * if necessary. Any non-existent pages will simply be part of the virtual
91 * early_nr_cpus_node - return number of cpus on a given node
96 * called yet. Note that node 0 will also count all non-existent cpus.
100 int cpu, n = 0; in early_nr_cpus_node() local
102 for_each_possible_early_cpu(cpu) in early_nr_cpus_node()
103 if (node == node_cpuid[cpu].nid) in early_nr_cpus_node()
110 * compute_pernodesize - compute size of pernode data
128 * per_cpu_node_setup - setup per-cpu areas on each node
129 * @cpu_data: per-cpu area on this node
132 * Copy the static per-cpu data into the region we just set aside and then
133 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
139 int cpu; in per_cpu_node_setup() local
141 for_each_possible_early_cpu(cpu) { in per_cpu_node_setup()
142 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start; in per_cpu_node_setup()
144 if (node != node_cpuid[cpu].nid) in per_cpu_node_setup()
147 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start); in per_cpu_node_setup()
148 __per_cpu_offset[cpu] = (char *)__va(cpu_data) - in per_cpu_node_setup()
160 if (cpu == 0) in per_cpu_node_setup()
162 (unsigned long)cpu_data - in per_cpu_node_setup()
173 * setup_per_cpu_areas - setup percpu areas
187 unsigned int cpu; in setup_per_cpu_areas() local
194 cpu_map = ai->groups[0].cpu_map; in setup_per_cpu_areas()
198 for_each_possible_cpu(cpu) in setup_per_cpu_areas()
200 (void *)(__per_cpu_offset[cpu] + __per_cpu_start)); in setup_per_cpu_areas()
201 base_offset = (void *)__per_cpu_start - base; in setup_per_cpu_areas()
206 for_each_possible_cpu(cpu) in setup_per_cpu_areas()
207 if (node == node_cpuid[cpu].nid) in setup_per_cpu_areas()
208 cpu_map[unit++] = cpu; in setup_per_cpu_areas()
212 static_size = __per_cpu_end - __per_cpu_start; in setup_per_cpu_areas()
214 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; in setup_per_cpu_areas()
219 ai->static_size = static_size; in setup_per_cpu_areas()
220 ai->reserved_size = reserved_size; in setup_per_cpu_areas()
221 ai->dyn_size = dyn_size; in setup_per_cpu_areas()
222 ai->unit_size = PERCPU_PAGE_SIZE; in setup_per_cpu_areas()
223 ai->atom_size = PAGE_SIZE; in setup_per_cpu_areas()
224 ai->alloc_size = PERCPU_PAGE_SIZE; in setup_per_cpu_areas()
231 ai->nr_groups = 0; in setup_per_cpu_areas()
233 cpu = cpu_map[unit]; in setup_per_cpu_areas()
234 node = node_cpuid[cpu].nid; in setup_per_cpu_areas()
237 gi->nr_units++; in setup_per_cpu_areas()
242 gi = &ai->groups[ai->nr_groups++]; in setup_per_cpu_areas()
243 gi->nr_units = 1; in setup_per_cpu_areas()
244 gi->base_offset = __per_cpu_offset[cpu] + base_offset; in setup_per_cpu_areas()
245 gi->cpu_map = &cpu_map[unit]; in setup_per_cpu_areas()
254 * fill_pernode - initialize pernode data.
286 * find_pernode_space - allocate memory for memory map and per-node structures
291 * This routine reserves space for the per-cpu data struct, the list of
292 * pg_data_ts and the per-node data struct. Each node will have something like
297 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
299 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
300 * |------------------------|
302 * |------------------------|
304 * |------------------------|
309 * could probably move the allocation of the per-cpu and ia64_node_data space
348 * reserve_pernode_space - reserve memory for per-node space
350 * Reserve the space used by the bootmem maps & per-node space in the boot
363 /* Now the per-node space */ in reserve_pernode_space()
377 * node_online_map is not set for hot-added nodes at this time, in scatter_node_data()
385 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; in scatter_node_data()
392 * initialize_pernode_data - fixup per-cpu & per-node pointers
394 * Each node's per-node area has a copy of the global pg_data_t list, so
395 * we copy that to each node here, as well as setting the per-cpu pointer
400 int cpu, node; in initialize_pernode_data() local
405 /* Set the node_data pointer for each per-cpu struct */ in initialize_pernode_data()
406 for_each_possible_early_cpu(cpu) { in initialize_pernode_data()
407 node = node_cpuid[cpu].nid; in initialize_pernode_data()
408 per_cpu(ia64_cpu_info, cpu).node_data = in initialize_pernode_data()
414 cpu = 0; in initialize_pernode_data()
415 node = node_cpuid[cpu].nid; in initialize_pernode_data()
417 ((char *)&ia64_cpu_info - __per_cpu_start)); in initialize_pernode_data()
418 cpu0_cpu_info->node_data = mem_data[node].node_data; in initialize_pernode_data()
424 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
462 * memory_less_nodes - allocate and initialize CPU only nodes pernode
481 * find_memory - walk the EFI memory map and setup the bootmem allocator
484 * allocate the per-cpu and per-node structures.
499 min_low_pfn = -1; in find_memory()
522 * per_cpu_init - setup per-cpu variables
529 int cpu; in per_cpu_init() local
534 for_each_possible_early_cpu(cpu) in per_cpu_init()
535 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; in per_cpu_init()
543 * call_pernode_memory - use SRAT to call callback functions with node info
571 (*func)(start, end - start, 0); in call_pernode_memory()
581 (*func)(rs, re - rs, node_memblk[i].nid); in call_pernode_memory()
589 * paging_init - setup page tables