Lines Matching +full:per +full:- +full:cpu

1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
33 * Track per-node information needed to setup the boot memory allocator, the
34 * per-node areas, and the real VM.
50 * To prevent cache aliasing effects, align per-node structures so that they
55 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
56 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
59 * build_node_maps - callback to setup mem_data structs for each node
67 * if necessary. Any non-existent pages will simply be part of the virtual
90 * early_nr_cpus_node - return number of cpus on a given node
95 * called yet. Note that node 0 will also count all non-existent cpus.
99 int cpu, n = 0; in early_nr_cpus_node() local
101 for_each_possible_early_cpu(cpu) in early_nr_cpus_node()
102 if (node == node_cpuid[cpu].nid) in early_nr_cpus_node()
109 * compute_pernodesize - compute size of pernode data
127 * per_cpu_node_setup - setup per-cpu areas on each node
128 * @cpu_data: per-cpu area on this node
131 * Copy the static per-cpu data into the region we just set aside and then
132 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
138 int cpu; in per_cpu_node_setup() local
140 for_each_possible_early_cpu(cpu) { in per_cpu_node_setup()
141 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start; in per_cpu_node_setup()
143 if (node != node_cpuid[cpu].nid) in per_cpu_node_setup()
146 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start); in per_cpu_node_setup()
147 __per_cpu_offset[cpu] = (char *)__va(cpu_data) - in per_cpu_node_setup()
159 if (cpu == 0) in per_cpu_node_setup()
161 (unsigned long)cpu_data - in per_cpu_node_setup()
172 * setup_per_cpu_areas - setup percpu areas
186 unsigned int cpu; in setup_per_cpu_areas() local
193 cpu_map = ai->groups[0].cpu_map; in setup_per_cpu_areas()
197 for_each_possible_cpu(cpu) in setup_per_cpu_areas()
199 (void *)(__per_cpu_offset[cpu] + __per_cpu_start)); in setup_per_cpu_areas()
200 base_offset = (void *)__per_cpu_start - base; in setup_per_cpu_areas()
205 for_each_possible_cpu(cpu) in setup_per_cpu_areas()
206 if (node == node_cpuid[cpu].nid) in setup_per_cpu_areas()
207 cpu_map[unit++] = cpu; in setup_per_cpu_areas()
211 static_size = __per_cpu_end - __per_cpu_start; in setup_per_cpu_areas()
213 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; in setup_per_cpu_areas()
218 ai->static_size = static_size; in setup_per_cpu_areas()
219 ai->reserved_size = reserved_size; in setup_per_cpu_areas()
220 ai->dyn_size = dyn_size; in setup_per_cpu_areas()
221 ai->unit_size = PERCPU_PAGE_SIZE; in setup_per_cpu_areas()
222 ai->atom_size = PAGE_SIZE; in setup_per_cpu_areas()
223 ai->alloc_size = PERCPU_PAGE_SIZE; in setup_per_cpu_areas()
230 ai->nr_groups = 0; in setup_per_cpu_areas()
232 cpu = cpu_map[unit]; in setup_per_cpu_areas()
233 node = node_cpuid[cpu].nid; in setup_per_cpu_areas()
236 gi->nr_units++; in setup_per_cpu_areas()
241 gi = &ai->groups[ai->nr_groups++]; in setup_per_cpu_areas()
242 gi->nr_units = 1; in setup_per_cpu_areas()
243 gi->base_offset = __per_cpu_offset[cpu] + base_offset; in setup_per_cpu_areas()
244 gi->cpu_map = &cpu_map[unit]; in setup_per_cpu_areas()
253 * fill_pernode - initialize pernode data.
285 * find_pernode_space - allocate memory for memory map and per-node structures
290 * This routine reserves space for the per-cpu data struct, the list of
291 * pg_data_ts and the per-node data struct. Each node will have something like
296 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
298 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
299 * |------------------------|
301 * |------------------------|
303 * |------------------------|
308 * could probably move the allocation of the per-cpu and ia64_node_data space
347 * reserve_pernode_space - reserve memory for per-node space
349 * Reserve the space used by the bootmem maps & per-node space in the boot
362 /* Now the per-node space */ in reserve_pernode_space()
376 * node_online_map is not set for hot-added nodes at this time, in scatter_node_data()
384 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; in scatter_node_data()
391 * initialize_pernode_data - fixup per-cpu & per-node pointers
393 * Each node's per-node area has a copy of the global pg_data_t list, so
394 * we copy that to each node here, as well as setting the per-cpu pointer
399 int cpu, node; in initialize_pernode_data() local
404 /* Set the node_data pointer for each per-cpu struct */ in initialize_pernode_data()
405 for_each_possible_early_cpu(cpu) { in initialize_pernode_data()
406 node = node_cpuid[cpu].nid; in initialize_pernode_data()
407 per_cpu(ia64_cpu_info, cpu).node_data = in initialize_pernode_data()
413 cpu = 0; in initialize_pernode_data()
414 node = node_cpuid[cpu].nid; in initialize_pernode_data()
416 ((char *)&ia64_cpu_info - __per_cpu_start)); in initialize_pernode_data()
417 cpu0_cpu_info->node_data = mem_data[node].node_data; in initialize_pernode_data()
423 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
461 * memory_less_nodes - allocate and initialize CPU only nodes pernode
480 * find_memory - walk the EFI memory map and setup the bootmem allocator
483 * allocate the per-cpu and per-node structures.
498 min_low_pfn = -1; in find_memory()
521 * per_cpu_init - setup per-cpu variables
528 int cpu; in per_cpu_init() local
533 for_each_possible_early_cpu(cpu) in per_cpu_init()
534 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; in per_cpu_init()
542 * call_pernode_memory - use SRAT to call callback functions with node info
570 (*func)(start, end - start, 0); in call_pernode_memory()
580 (*func)(rs, re - rs, node_memblk[i].nid); in call_pernode_memory()
588 * paging_init - setup page tables
606 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * in paging_init()
617 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; in paging_init()