Lines Matching refs:cpu
37 int cpu; in get_cpu_for_node() local
43 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
44 if (cpu >= 0) in get_cpu_for_node()
45 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
50 return cpu; in get_cpu_for_node()
59 int cpu; in parse_core() local
67 cpu = get_cpu_for_node(t); in parse_core()
68 if (cpu >= 0) { in parse_core()
69 cpu_topology[cpu].package_id = package_id; in parse_core()
70 cpu_topology[cpu].core_id = core_id; in parse_core()
71 cpu_topology[cpu].thread_id = i; in parse_core()
83 cpu = get_cpu_for_node(core); in parse_core()
84 if (cpu >= 0) { in parse_core()
91 cpu_topology[cpu].package_id = package_id; in parse_core()
92 cpu_topology[cpu].core_id = core_id; in parse_core()
173 int cpu; in parse_dt_topology() local
199 for_each_possible_cpu(cpu) in parse_dt_topology()
200 if (cpu_topology[cpu].package_id == -1) in parse_dt_topology()
216 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
218 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
221 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
223 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
225 if (cpu_topology[cpu].llc_id != -1) { in cpu_coregroup_mask()
226 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
227 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
236 int cpu; in update_siblings_masks() local
239 for_each_online_cpu(cpu) { in update_siblings_masks()
240 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
243 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
251 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
257 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
299 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
301 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
304 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
307 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
309 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
314 unsigned int cpu; in reset_cpu_topology() local
316 for_each_possible_cpu(cpu) { in reset_cpu_topology()
317 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
324 clear_cpu_topology(cpu); in reset_cpu_topology()
328 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
332 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
333 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
334 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
335 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
336 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
337 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
339 clear_cpu_topology(cpu); in remove_cpu_topology()
350 int cpu, topology_id; in parse_acpi_topology() local
354 for_each_possible_cpu(cpu) { in parse_acpi_topology()
357 topology_id = find_acpi_cpu_topology(cpu, 0); in parse_acpi_topology()
362 cpu_topology[cpu].thread_id = topology_id; in parse_acpi_topology()
363 topology_id = find_acpi_cpu_topology(cpu, 1); in parse_acpi_topology()
364 cpu_topology[cpu].core_id = topology_id; in parse_acpi_topology()
366 cpu_topology[cpu].thread_id = -1; in parse_acpi_topology()
367 cpu_topology[cpu].core_id = topology_id; in parse_acpi_topology()
369 topology_id = find_acpi_cpu_topology_package(cpu); in parse_acpi_topology()
370 cpu_topology[cpu].package_id = topology_id; in parse_acpi_topology()
372 i = acpi_find_last_cache_level(cpu); in parse_acpi_topology()
379 cache_id = find_acpi_cpu_cache_topology(cpu, i); in parse_acpi_topology()
381 cpu_topology[cpu].llc_id = cache_id; in parse_acpi_topology()