Lines Matching full:cpu

3  * Arch specific cpu topology information
11 #include <linux/cpu.h>
63 int cpu; in topology_set_scale_freq_source() local
74 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source()
75 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_set_scale_freq_source()
79 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source()
80 cpumask_set_cpu(cpu, &scale_freq_counters_mask); in topology_set_scale_freq_source()
94 int cpu; in topology_clear_scale_freq_source() local
98 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source()
99 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_clear_scale_freq_source()
102 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source()
103 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); in topology_clear_scale_freq_source()
156 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument
158 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()
182 int cpu; in topology_update_thermal_pressure() local
184 cpu = cpumask_first(cpus); in topology_update_thermal_pressure()
185 max_capacity = arch_scale_cpu_capacity(cpu); in topology_update_thermal_pressure()
186 max_freq = per_cpu(freq_factor, cpu); in topology_update_thermal_pressure()
202 trace_thermal_pressure_update(cpu, th_pressure); in topology_update_thermal_pressure()
204 for_each_cpu(cpu, cpus) in topology_update_thermal_pressure()
205 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); in topology_update_thermal_pressure()
213 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local
215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
226 struct device *cpu; in register_cpu_capacity_sysctl() local
229 cpu = get_cpu_device(i); in register_cpu_capacity_sysctl()
230 if (!cpu) { in register_cpu_capacity_sysctl()
231 pr_err("%s: too early to get CPU%d device!\n", in register_cpu_capacity_sysctl()
235 device_create_file(cpu, &dev_attr_cpu_capacity); in register_cpu_capacity_sysctl()
275 int cpu; in topology_normalize_cpu_scale() local
281 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
282 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); in topology_normalize_cpu_scale()
287 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
288 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); in topology_normalize_cpu_scale()
291 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
292 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", in topology_normalize_cpu_scale()
293 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
297 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
319 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
321 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
324 * Update freq_factor for calculating early boot cpu capacities. in topology_parse_cpu_capacity()
325 * For non-clk CPU DVFS mechanism, there's no way to get the in topology_parse_cpu_capacity()
331 per_cpu(freq_factor, cpu) = in topology_parse_cpu_capacity()
354 int cpu; in topology_init_cpu_capacity_cppc() local
364 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
365 if (!cppc_get_perf_caps(cpu, &perf_caps) && in topology_init_cpu_capacity_cppc()
368 raw_capacity[cpu] = perf_caps.highest_perf; in topology_init_cpu_capacity_cppc()
369 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", in topology_init_cpu_capacity_cppc()
370 cpu, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
374 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); in topology_init_cpu_capacity_cppc()
399 int cpu; in init_cpu_capacity_callback() local
407 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", in init_cpu_capacity_callback()
413 for_each_cpu(cpu, policy->related_cpus) in init_cpu_capacity_callback()
414 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; in init_cpu_capacity_callback()
437 * information is not needed for cpu capacity initialization. in register_cpufreq_notifier()
470 * This function returns the logic cpu number of the node.
472 * (1) logic cpu number which is > 0.
474 * there is no possible logical CPU in the kernel to match. This happens
476 * CPU nodes in DT. We need to just ignore this case.
482 int cpu; in get_cpu_for_node() local
484 cpu_node = of_parse_phandle(node, "cpu", 0); in get_cpu_for_node()
488 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
489 if (cpu >= 0) in get_cpu_for_node()
490 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
492 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", in get_cpu_for_node()
496 return cpu; in get_cpu_for_node()
505 int cpu; in parse_core() local
513 cpu = get_cpu_for_node(t); in parse_core()
514 if (cpu >= 0) { in parse_core()
515 cpu_topology[cpu].package_id = package_id; in parse_core()
516 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
517 cpu_topology[cpu].core_id = core_id; in parse_core()
518 cpu_topology[cpu].thread_id = i; in parse_core()
519 } else if (cpu != -ENODEV) { in parse_core()
520 pr_err("%pOF: Can't get CPU for thread\n", t); in parse_core()
529 cpu = get_cpu_for_node(core); in parse_core()
530 if (cpu >= 0) { in parse_core()
532 pr_err("%pOF: Core has both threads and CPU\n", in parse_core()
537 cpu_topology[cpu].package_id = package_id; in parse_core()
538 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
539 cpu_topology[cpu].core_id = core_id; in parse_core()
540 } else if (leaf && cpu != -ENODEV) { in parse_core()
541 pr_err("%pOF: Can't get CPU for leaf core\n", core); in parse_core()
588 pr_err("%pOF: cpu-map children should be clusters\n", in parse_cluster()
646 int cpu; in parse_dt_topology() local
650 pr_err("No CPU information found in DT\n"); in parse_dt_topology()
655 * When topology is provided cpu-map is essentially a root in parse_dt_topology()
658 map = of_get_child_by_name(cn, "cpu-map"); in parse_dt_topology()
672 for_each_possible_cpu(cpu) in parse_dt_topology()
673 if (cpu_topology[cpu].package_id < 0) { in parse_dt_topology()
687 * cpu topology table
692 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
694 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
697 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
699 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
702 if (last_level_cache_is_valid(cpu)) { in cpu_coregroup_mask()
703 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
704 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
708 * For systems with no shared cpu-side LLC but with clusters defined, in cpu_coregroup_mask()
713 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) in cpu_coregroup_mask()
714 core_mask = &cpu_topology[cpu].cluster_sibling; in cpu_coregroup_mask()
719 const struct cpumask *cpu_clustergroup_mask(int cpu) in cpu_clustergroup_mask() argument
725 if (cpumask_subset(cpu_coregroup_mask(cpu), in cpu_clustergroup_mask()
726 &cpu_topology[cpu].cluster_sibling)) in cpu_clustergroup_mask()
727 return topology_sibling_cpumask(cpu); in cpu_clustergroup_mask()
729 return &cpu_topology[cpu].cluster_sibling; in cpu_clustergroup_mask()
735 int cpu, ret; in update_siblings_masks() local
742 for_each_online_cpu(cpu) { in update_siblings_masks()
743 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
745 if (last_level_cache_is_shared(cpu, cpuid)) { in update_siblings_masks()
746 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
754 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
760 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); in update_siblings_masks()
768 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
772 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
774 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
777 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
780 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); in clear_cpu_topology()
783 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
785 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
790 unsigned int cpu; in reset_cpu_topology() local
792 for_each_possible_cpu(cpu) { in reset_cpu_topology()
793 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
800 clear_cpu_topology(cpu); in reset_cpu_topology()
804 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
808 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
809 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
810 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
811 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
812 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) in remove_cpu_topology()
813 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); in remove_cpu_topology()
814 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
815 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
817 clear_cpu_topology(cpu); in remove_cpu_topology()
828 int cpu, ret; in init_cpu_topology() local
844 for_each_possible_cpu(cpu) { in init_cpu_topology()
845 ret = fetch_cache_info(cpu); in init_cpu_topology()
865 pr_debug("CPU%u: package %d core %d thread %d\n", in store_cpu_topology()