Lines Matching full:cpu
3 * Arch specific cpu topology information
10 #include <linux/cpu.h>
61 int cpu; in topology_set_scale_freq_source() local
72 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source()
73 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_set_scale_freq_source()
77 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source()
78 cpumask_set_cpu(cpu, &scale_freq_counters_mask); in topology_set_scale_freq_source()
92 int cpu; in topology_clear_scale_freq_source() local
96 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source()
97 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_clear_scale_freq_source()
100 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source()
101 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); in topology_clear_scale_freq_source()
154 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument
156 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()
164 int cpu; in topology_set_thermal_pressure() local
166 for_each_cpu(cpu, cpus) in topology_set_thermal_pressure()
167 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); in topology_set_thermal_pressure()
175 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local
177 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
188 struct device *cpu; in register_cpu_capacity_sysctl() local
191 cpu = get_cpu_device(i); in register_cpu_capacity_sysctl()
192 if (!cpu) { in register_cpu_capacity_sysctl()
193 pr_err("%s: too early to get CPU%d device!\n", in register_cpu_capacity_sysctl()
197 device_create_file(cpu, &dev_attr_cpu_capacity); in register_cpu_capacity_sysctl()
238 int cpu; in topology_normalize_cpu_scale() local
244 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
245 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); in topology_normalize_cpu_scale()
250 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
251 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); in topology_normalize_cpu_scale()
254 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
255 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", in topology_normalize_cpu_scale()
256 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
260 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
282 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
284 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
287 * Update freq_factor for calculating early boot cpu capacities. in topology_parse_cpu_capacity()
288 * For non-clk CPU DVFS mechanism, there's no way to get the in topology_parse_cpu_capacity()
294 per_cpu(freq_factor, cpu) = in topology_parse_cpu_capacity()
322 int cpu; in init_cpu_capacity_callback() local
330 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", in init_cpu_capacity_callback()
336 for_each_cpu(cpu, policy->related_cpus) in init_cpu_capacity_callback()
337 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; in init_cpu_capacity_callback()
359 * on ACPI-based systems we need to use the default cpu capacity in register_cpufreq_notifier()
360 * until we have the necessary code to parse the cpu capacity, so in register_cpufreq_notifier()
394 * This function returns the logic cpu number of the node.
396 * (1) logic cpu number which is > 0.
398 * there is no possible logical CPU in the kernel to match. This happens
400 * CPU nodes in DT. We need to just ignore this case.
406 int cpu; in get_cpu_for_node() local
408 cpu_node = of_parse_phandle(node, "cpu", 0); in get_cpu_for_node()
412 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
413 if (cpu >= 0) in get_cpu_for_node()
414 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
416 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", in get_cpu_for_node()
420 return cpu; in get_cpu_for_node()
429 int cpu; in parse_core() local
437 cpu = get_cpu_for_node(t); in parse_core()
438 if (cpu >= 0) { in parse_core()
439 cpu_topology[cpu].package_id = package_id; in parse_core()
440 cpu_topology[cpu].core_id = core_id; in parse_core()
441 cpu_topology[cpu].thread_id = i; in parse_core()
442 } else if (cpu != -ENODEV) { in parse_core()
443 pr_err("%pOF: Can't get CPU for thread\n", t); in parse_core()
452 cpu = get_cpu_for_node(core); in parse_core()
453 if (cpu >= 0) { in parse_core()
455 pr_err("%pOF: Core has both threads and CPU\n", in parse_core()
460 cpu_topology[cpu].package_id = package_id; in parse_core()
461 cpu_topology[cpu].core_id = core_id; in parse_core()
462 } else if (leaf && cpu != -ENODEV) { in parse_core()
463 pr_err("%pOF: Can't get CPU for leaf core\n", core); in parse_core()
508 pr_err("%pOF: cpu-map children should be clusters\n", in parse_cluster()
542 int cpu; in parse_dt_topology() local
546 pr_err("No CPU information found in DT\n"); in parse_dt_topology()
551 * When topology is provided cpu-map is essentially a root in parse_dt_topology()
554 map = of_get_child_by_name(cn, "cpu-map"); in parse_dt_topology()
568 for_each_possible_cpu(cpu) in parse_dt_topology()
569 if (cpu_topology[cpu].package_id == -1) in parse_dt_topology()
581 * cpu topology table
586 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
588 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
591 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
593 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
595 if (cpu_topology[cpu].llc_id != -1) { in cpu_coregroup_mask()
596 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
597 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
606 int cpu; in update_siblings_masks() local
609 for_each_online_cpu(cpu) { in update_siblings_masks()
610 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
613 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
621 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
627 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
631 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
633 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
636 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
639 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
641 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
646 unsigned int cpu; in reset_cpu_topology() local
648 for_each_possible_cpu(cpu) { in reset_cpu_topology()
649 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
656 clear_cpu_topology(cpu); in reset_cpu_topology()
660 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
664 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
665 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
666 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
667 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
668 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
669 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
671 clear_cpu_topology(cpu); in remove_cpu_topology()