Lines Matching full:cpu
3 * Arch specific cpu topology information
10 #include <linux/cpu.h>
40 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument
42 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()
49 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local
51 return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
62 struct device *cpu; in register_cpu_capacity_sysctl() local
65 cpu = get_cpu_device(i); in register_cpu_capacity_sysctl()
66 if (!cpu) { in register_cpu_capacity_sysctl()
67 pr_err("%s: too early to get CPU%d device!\n", in register_cpu_capacity_sysctl()
71 device_create_file(cpu, &dev_attr_cpu_capacity); in register_cpu_capacity_sysctl()
111 int cpu; in topology_normalize_cpu_scale() local
117 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
118 pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", in topology_normalize_cpu_scale()
119 cpu, raw_capacity[cpu]); in topology_normalize_cpu_scale()
120 capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) in topology_normalize_cpu_scale()
122 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
123 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", in topology_normalize_cpu_scale()
124 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
128 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
150 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
152 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
177 int cpu; in init_cpu_capacity_callback() local
185 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", in init_cpu_capacity_callback()
191 for_each_cpu(cpu, policy->related_cpus) { in init_cpu_capacity_callback()
192 raw_capacity[cpu] = topology_get_cpu_scale(cpu) * in init_cpu_capacity_callback()
194 capacity_scale = max(raw_capacity[cpu], capacity_scale); in init_cpu_capacity_callback()
217 * on ACPI-based systems we need to use the default cpu capacity in register_cpufreq_notifier()
218 * until we have the necessary code to parse the cpu capacity, so in register_cpufreq_notifier()
254 int cpu; in get_cpu_for_node() local
256 cpu_node = of_parse_phandle(node, "cpu", 0); in get_cpu_for_node()
260 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
261 if (cpu >= 0) in get_cpu_for_node()
262 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
264 pr_crit("Unable to find CPU node for %pOF\n", cpu_node); in get_cpu_for_node()
267 return cpu; in get_cpu_for_node()
276 int cpu; in parse_core() local
284 cpu = get_cpu_for_node(t); in parse_core()
285 if (cpu >= 0) { in parse_core()
286 cpu_topology[cpu].package_id = package_id; in parse_core()
287 cpu_topology[cpu].core_id = core_id; in parse_core()
288 cpu_topology[cpu].thread_id = i; in parse_core()
290 pr_err("%pOF: Can't get CPU for thread\n", in parse_core()
300 cpu = get_cpu_for_node(core); in parse_core()
301 if (cpu >= 0) { in parse_core()
303 pr_err("%pOF: Core has both threads and CPU\n", in parse_core()
308 cpu_topology[cpu].package_id = package_id; in parse_core()
309 cpu_topology[cpu].core_id = core_id; in parse_core()
311 pr_err("%pOF: Can't get CPU for leaf core\n", core); in parse_core()
356 pr_err("%pOF: cpu-map children should be clusters\n", in parse_cluster()
390 int cpu; in parse_dt_topology() local
394 pr_err("No CPU information found in DT\n"); in parse_dt_topology()
399 * When topology is provided cpu-map is essentially a root in parse_dt_topology()
402 map = of_get_child_by_name(cn, "cpu-map"); in parse_dt_topology()
416 for_each_possible_cpu(cpu) in parse_dt_topology()
417 if (cpu_topology[cpu].package_id == -1) in parse_dt_topology()
429 * cpu topology table
434 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
436 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
439 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
441 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
443 if (cpu_topology[cpu].llc_id != -1) { in cpu_coregroup_mask()
444 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
445 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
454 int cpu; in update_siblings_masks() local
457 for_each_online_cpu(cpu) { in update_siblings_masks()
458 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
461 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
469 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
475 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
479 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
481 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
484 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
487 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
489 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
494 unsigned int cpu; in reset_cpu_topology() local
496 for_each_possible_cpu(cpu) { in reset_cpu_topology()
497 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
504 clear_cpu_topology(cpu); in reset_cpu_topology()
508 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
512 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
513 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
514 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
515 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
516 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
517 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
519 clear_cpu_topology(cpu); in remove_cpu_topology()