1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
7 */
8
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpufreq.h>
12 #include <linux/device.h>
13 #include <linux/of.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/sched/topology.h>
17 #include <linux/cpuset.h>
18 #include <linux/cpumask.h>
19 #include <linux/init.h>
20 #include <linux/percpu.h>
21 #include <linux/rcupdate.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24
25 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
26 static struct cpumask scale_freq_counters_mask;
27 static bool scale_freq_invariant;
28
supports_scale_freq_counters(const struct cpumask * cpus)29 static bool supports_scale_freq_counters(const struct cpumask *cpus)
30 {
31 return cpumask_subset(cpus, &scale_freq_counters_mask);
32 }
33
topology_scale_freq_invariant(void)34 bool topology_scale_freq_invariant(void)
35 {
36 return cpufreq_supports_freq_invariance() ||
37 supports_scale_freq_counters(cpu_online_mask);
38 }
39
update_scale_freq_invariant(bool status)40 static void update_scale_freq_invariant(bool status)
41 {
42 if (scale_freq_invariant == status)
43 return;
44
45 /*
46 * Task scheduler behavior depends on frequency invariance support,
47 * either cpufreq or counter driven. If the support status changes as
48 * a result of counter initialisation and use, retrigger the build of
49 * scheduling domains to ensure the information is propagated properly.
50 */
51 if (topology_scale_freq_invariant() == status) {
52 scale_freq_invariant = status;
53 rebuild_sched_domains_energy();
54 }
55 }
56
topology_set_scale_freq_source(struct scale_freq_data * data,const struct cpumask * cpus)57 void topology_set_scale_freq_source(struct scale_freq_data *data,
58 const struct cpumask *cpus)
59 {
60 struct scale_freq_data *sfd;
61 int cpu;
62
63 /*
64 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
65 * supported by cpufreq.
66 */
67 if (cpumask_empty(&scale_freq_counters_mask))
68 scale_freq_invariant = topology_scale_freq_invariant();
69
70 rcu_read_lock();
71
72 for_each_cpu(cpu, cpus) {
73 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
74
75 /* Use ARCH provided counters whenever possible */
76 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
77 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
78 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
79 }
80 }
81
82 rcu_read_unlock();
83
84 update_scale_freq_invariant(true);
85 }
86 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
87
topology_clear_scale_freq_source(enum scale_freq_source source,const struct cpumask * cpus)88 void topology_clear_scale_freq_source(enum scale_freq_source source,
89 const struct cpumask *cpus)
90 {
91 struct scale_freq_data *sfd;
92 int cpu;
93
94 rcu_read_lock();
95
96 for_each_cpu(cpu, cpus) {
97 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
98
99 if (sfd && sfd->source == source) {
100 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
101 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
102 }
103 }
104
105 rcu_read_unlock();
106
107 /*
108 * Make sure all references to previous sft_data are dropped to avoid
109 * use-after-free races.
110 */
111 synchronize_rcu();
112
113 update_scale_freq_invariant(false);
114 }
115 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
116
topology_scale_freq_tick(void)117 void topology_scale_freq_tick(void)
118 {
119 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
120
121 if (sfd)
122 sfd->set_freq_scale();
123 }
124
125 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
126 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
127
topology_set_freq_scale(const struct cpumask * cpus,unsigned long cur_freq,unsigned long max_freq)128 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
129 unsigned long max_freq)
130 {
131 unsigned long scale;
132 int i;
133
134 if (WARN_ON_ONCE(!cur_freq || !max_freq))
135 return;
136
137 /*
138 * If the use of counters for FIE is enabled, just return as we don't
139 * want to update the scale factor with information from CPUFREQ.
140 * Instead the scale factor will be updated from arch_scale_freq_tick.
141 */
142 if (supports_scale_freq_counters(cpus))
143 return;
144
145 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
146
147 for_each_cpu(i, cpus)
148 per_cpu(arch_freq_scale, i) = scale;
149 }
150
151 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
152 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
153
topology_set_cpu_scale(unsigned int cpu,unsigned long capacity)154 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
155 {
156 per_cpu(cpu_scale, cpu) = capacity;
157 }
158
159 DEFINE_PER_CPU(unsigned long, thermal_pressure);
160
topology_set_thermal_pressure(const struct cpumask * cpus,unsigned long th_pressure)161 void topology_set_thermal_pressure(const struct cpumask *cpus,
162 unsigned long th_pressure)
163 {
164 int cpu;
165
166 for_each_cpu(cpu, cpus)
167 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
168 }
169 EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
170
cpu_capacity_show(struct device * dev,struct device_attribute * attr,char * buf)171 static ssize_t cpu_capacity_show(struct device *dev,
172 struct device_attribute *attr,
173 char *buf)
174 {
175 struct cpu *cpu = container_of(dev, struct cpu, dev);
176
177 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
178 }
179
180 static void update_topology_flags_workfn(struct work_struct *work);
181 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
182
183 static DEVICE_ATTR_RO(cpu_capacity);
184
register_cpu_capacity_sysctl(void)185 static int register_cpu_capacity_sysctl(void)
186 {
187 int i;
188 struct device *cpu;
189
190 for_each_possible_cpu(i) {
191 cpu = get_cpu_device(i);
192 if (!cpu) {
193 pr_err("%s: too early to get CPU%d device!\n",
194 __func__, i);
195 continue;
196 }
197 device_create_file(cpu, &dev_attr_cpu_capacity);
198 }
199
200 return 0;
201 }
202 subsys_initcall(register_cpu_capacity_sysctl);
203
204 static int update_topology;
205
topology_update_cpu_topology(void)206 int topology_update_cpu_topology(void)
207 {
208 return update_topology;
209 }
210
211 /*
212 * Updating the sched_domains can't be done directly from cpufreq callbacks
213 * due to locking, so queue the work for later.
214 */
update_topology_flags_workfn(struct work_struct * work)215 static void update_topology_flags_workfn(struct work_struct *work)
216 {
217 update_topology = 1;
218 rebuild_sched_domains();
219 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
220 update_topology = 0;
221 }
222
223 static DEFINE_PER_CPU(u32, freq_factor) = 1;
224 static u32 *raw_capacity;
225
free_raw_capacity(void)226 static int free_raw_capacity(void)
227 {
228 kfree(raw_capacity);
229 raw_capacity = NULL;
230
231 return 0;
232 }
233
topology_normalize_cpu_scale(void)234 void topology_normalize_cpu_scale(void)
235 {
236 u64 capacity;
237 u64 capacity_scale;
238 int cpu;
239
240 if (!raw_capacity)
241 return;
242
243 capacity_scale = 1;
244 for_each_possible_cpu(cpu) {
245 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
246 capacity_scale = max(capacity, capacity_scale);
247 }
248
249 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
250 for_each_possible_cpu(cpu) {
251 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
252 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
253 capacity_scale);
254 topology_set_cpu_scale(cpu, capacity);
255 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
256 cpu, topology_get_cpu_scale(cpu));
257 }
258 }
259
topology_parse_cpu_capacity(struct device_node * cpu_node,int cpu)260 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
261 {
262 struct clk *cpu_clk;
263 static bool cap_parsing_failed;
264 int ret;
265 u32 cpu_capacity;
266
267 if (cap_parsing_failed)
268 return false;
269
270 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
271 &cpu_capacity);
272 if (!ret) {
273 if (!raw_capacity) {
274 raw_capacity = kcalloc(num_possible_cpus(),
275 sizeof(*raw_capacity),
276 GFP_KERNEL);
277 if (!raw_capacity) {
278 cap_parsing_failed = true;
279 return false;
280 }
281 }
282 raw_capacity[cpu] = cpu_capacity;
283 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
284 cpu_node, raw_capacity[cpu]);
285
286 /*
287 * Update freq_factor for calculating early boot cpu capacities.
288 * For non-clk CPU DVFS mechanism, there's no way to get the
289 * frequency value now, assuming they are running at the same
290 * frequency (by keeping the initial freq_factor value).
291 */
292 cpu_clk = of_clk_get(cpu_node, 0);
293 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
294 per_cpu(freq_factor, cpu) =
295 clk_get_rate(cpu_clk) / 1000;
296 clk_put(cpu_clk);
297 }
298 } else {
299 if (raw_capacity) {
300 pr_err("cpu_capacity: missing %pOF raw capacity\n",
301 cpu_node);
302 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
303 }
304 cap_parsing_failed = true;
305 free_raw_capacity();
306 }
307
308 return !ret;
309 }
310
311 #ifdef CONFIG_CPU_FREQ
312 static cpumask_var_t cpus_to_visit;
313 static void parsing_done_workfn(struct work_struct *work);
314 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
315
316 static int
init_cpu_capacity_callback(struct notifier_block * nb,unsigned long val,void * data)317 init_cpu_capacity_callback(struct notifier_block *nb,
318 unsigned long val,
319 void *data)
320 {
321 struct cpufreq_policy *policy = data;
322 int cpu;
323
324 if (!raw_capacity)
325 return 0;
326
327 if (val != CPUFREQ_CREATE_POLICY)
328 return 0;
329
330 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
331 cpumask_pr_args(policy->related_cpus),
332 cpumask_pr_args(cpus_to_visit));
333
334 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
335
336 for_each_cpu(cpu, policy->related_cpus)
337 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
338
339 if (cpumask_empty(cpus_to_visit)) {
340 topology_normalize_cpu_scale();
341 schedule_work(&update_topology_flags_work);
342 free_raw_capacity();
343 pr_debug("cpu_capacity: parsing done\n");
344 schedule_work(&parsing_done_work);
345 }
346
347 return 0;
348 }
349
350 static struct notifier_block init_cpu_capacity_notifier = {
351 .notifier_call = init_cpu_capacity_callback,
352 };
353
register_cpufreq_notifier(void)354 static int __init register_cpufreq_notifier(void)
355 {
356 int ret;
357
358 /*
359 * on ACPI-based systems we need to use the default cpu capacity
360 * until we have the necessary code to parse the cpu capacity, so
361 * skip registering cpufreq notifier.
362 */
363 if (!acpi_disabled || !raw_capacity)
364 return -EINVAL;
365
366 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
367 return -ENOMEM;
368
369 cpumask_copy(cpus_to_visit, cpu_possible_mask);
370
371 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
372 CPUFREQ_POLICY_NOTIFIER);
373
374 if (ret)
375 free_cpumask_var(cpus_to_visit);
376
377 return ret;
378 }
379 core_initcall(register_cpufreq_notifier);
380
parsing_done_workfn(struct work_struct * work)381 static void parsing_done_workfn(struct work_struct *work)
382 {
383 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
384 CPUFREQ_POLICY_NOTIFIER);
385 free_cpumask_var(cpus_to_visit);
386 }
387
388 #else
389 core_initcall(free_raw_capacity);
390 #endif
391
392 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
393 /*
394 * This function returns the logic cpu number of the node.
395 * There are basically three kinds of return values:
396 * (1) logic cpu number which is > 0.
397 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
398 * there is no possible logical CPU in the kernel to match. This happens
399 * when CONFIG_NR_CPUS is configure to be smaller than the number of
400 * CPU nodes in DT. We need to just ignore this case.
401 * (3) -1 if the node does not exist in the device tree
402 */
get_cpu_for_node(struct device_node * node)403 static int __init get_cpu_for_node(struct device_node *node)
404 {
405 struct device_node *cpu_node;
406 int cpu;
407
408 cpu_node = of_parse_phandle(node, "cpu", 0);
409 if (!cpu_node)
410 return -1;
411
412 cpu = of_cpu_node_to_id(cpu_node);
413 if (cpu >= 0)
414 topology_parse_cpu_capacity(cpu_node, cpu);
415 else
416 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
417 cpu_node, cpumask_pr_args(cpu_possible_mask));
418
419 of_node_put(cpu_node);
420 return cpu;
421 }
422
parse_core(struct device_node * core,int package_id,int core_id)423 static int __init parse_core(struct device_node *core, int package_id,
424 int core_id)
425 {
426 char name[20];
427 bool leaf = true;
428 int i = 0;
429 int cpu;
430 struct device_node *t;
431
432 do {
433 snprintf(name, sizeof(name), "thread%d", i);
434 t = of_get_child_by_name(core, name);
435 if (t) {
436 leaf = false;
437 cpu = get_cpu_for_node(t);
438 if (cpu >= 0) {
439 cpu_topology[cpu].package_id = package_id;
440 cpu_topology[cpu].core_id = core_id;
441 cpu_topology[cpu].thread_id = i;
442 } else if (cpu != -ENODEV) {
443 pr_err("%pOF: Can't get CPU for thread\n", t);
444 of_node_put(t);
445 return -EINVAL;
446 }
447 of_node_put(t);
448 }
449 i++;
450 } while (t);
451
452 cpu = get_cpu_for_node(core);
453 if (cpu >= 0) {
454 if (!leaf) {
455 pr_err("%pOF: Core has both threads and CPU\n",
456 core);
457 return -EINVAL;
458 }
459
460 cpu_topology[cpu].package_id = package_id;
461 cpu_topology[cpu].core_id = core_id;
462 } else if (leaf && cpu != -ENODEV) {
463 pr_err("%pOF: Can't get CPU for leaf core\n", core);
464 return -EINVAL;
465 }
466
467 return 0;
468 }
469
parse_cluster(struct device_node * cluster,int depth)470 static int __init parse_cluster(struct device_node *cluster, int depth)
471 {
472 char name[20];
473 bool leaf = true;
474 bool has_cores = false;
475 struct device_node *c;
476 static int package_id __initdata;
477 int core_id = 0;
478 int i, ret;
479
480 /*
481 * First check for child clusters; we currently ignore any
482 * information about the nesting of clusters and present the
483 * scheduler with a flat list of them.
484 */
485 i = 0;
486 do {
487 snprintf(name, sizeof(name), "cluster%d", i);
488 c = of_get_child_by_name(cluster, name);
489 if (c) {
490 leaf = false;
491 ret = parse_cluster(c, depth + 1);
492 of_node_put(c);
493 if (ret != 0)
494 return ret;
495 }
496 i++;
497 } while (c);
498
499 /* Now check for cores */
500 i = 0;
501 do {
502 snprintf(name, sizeof(name), "core%d", i);
503 c = of_get_child_by_name(cluster, name);
504 if (c) {
505 has_cores = true;
506
507 if (depth == 0) {
508 pr_err("%pOF: cpu-map children should be clusters\n",
509 c);
510 of_node_put(c);
511 return -EINVAL;
512 }
513
514 if (leaf) {
515 ret = parse_core(c, package_id, core_id++);
516 } else {
517 pr_err("%pOF: Non-leaf cluster with core %s\n",
518 cluster, name);
519 ret = -EINVAL;
520 }
521
522 of_node_put(c);
523 if (ret != 0)
524 return ret;
525 }
526 i++;
527 } while (c);
528
529 if (leaf && !has_cores)
530 pr_warn("%pOF: empty cluster\n", cluster);
531
532 if (leaf)
533 package_id++;
534
535 return 0;
536 }
537
parse_dt_topology(void)538 static int __init parse_dt_topology(void)
539 {
540 struct device_node *cn, *map;
541 int ret = 0;
542 int cpu;
543
544 cn = of_find_node_by_path("/cpus");
545 if (!cn) {
546 pr_err("No CPU information found in DT\n");
547 return 0;
548 }
549
550 /*
551 * When topology is provided cpu-map is essentially a root
552 * cluster with restricted subnodes.
553 */
554 map = of_get_child_by_name(cn, "cpu-map");
555 if (!map)
556 goto out;
557
558 ret = parse_cluster(map, 0);
559 if (ret != 0)
560 goto out_map;
561
562 topology_normalize_cpu_scale();
563
564 /*
565 * Check that all cores are in the topology; the SMP code will
566 * only mark cores described in the DT as possible.
567 */
568 for_each_possible_cpu(cpu)
569 if (cpu_topology[cpu].package_id == -1)
570 ret = -EINVAL;
571
572 out_map:
573 of_node_put(map);
574 out:
575 of_node_put(cn);
576 return ret;
577 }
578 #endif
579
580 /*
581 * cpu topology table
582 */
583 struct cpu_topology cpu_topology[NR_CPUS];
584 EXPORT_SYMBOL_GPL(cpu_topology);
585
cpu_coregroup_mask(int cpu)586 const struct cpumask *cpu_coregroup_mask(int cpu)
587 {
588 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
589
590 /* Find the smaller of NUMA, core or LLC siblings */
591 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
592 /* not numa in package, lets use the package siblings */
593 core_mask = &cpu_topology[cpu].core_sibling;
594 }
595 if (cpu_topology[cpu].llc_id != -1) {
596 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
597 core_mask = &cpu_topology[cpu].llc_sibling;
598 }
599
600 return core_mask;
601 }
602
update_siblings_masks(unsigned int cpuid)603 void update_siblings_masks(unsigned int cpuid)
604 {
605 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
606 int cpu;
607
608 /* update core and thread sibling masks */
609 for_each_online_cpu(cpu) {
610 cpu_topo = &cpu_topology[cpu];
611
612 if (cpuid_topo->llc_id == cpu_topo->llc_id) {
613 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
614 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
615 }
616
617 if (cpuid_topo->package_id != cpu_topo->package_id)
618 continue;
619
620 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
621 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
622
623 if (cpuid_topo->core_id != cpu_topo->core_id)
624 continue;
625
626 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
627 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
628 }
629 }
630
clear_cpu_topology(int cpu)631 static void clear_cpu_topology(int cpu)
632 {
633 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
634
635 cpumask_clear(&cpu_topo->llc_sibling);
636 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
637
638 cpumask_clear(&cpu_topo->core_sibling);
639 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
640 cpumask_clear(&cpu_topo->thread_sibling);
641 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
642 }
643
reset_cpu_topology(void)644 void __init reset_cpu_topology(void)
645 {
646 unsigned int cpu;
647
648 for_each_possible_cpu(cpu) {
649 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
650
651 cpu_topo->thread_id = -1;
652 cpu_topo->core_id = -1;
653 cpu_topo->package_id = -1;
654 cpu_topo->llc_id = -1;
655
656 clear_cpu_topology(cpu);
657 }
658 }
659
remove_cpu_topology(unsigned int cpu)660 void remove_cpu_topology(unsigned int cpu)
661 {
662 int sibling;
663
664 for_each_cpu(sibling, topology_core_cpumask(cpu))
665 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
666 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
667 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
668 for_each_cpu(sibling, topology_llc_cpumask(cpu))
669 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
670
671 clear_cpu_topology(cpu);
672 }
673
parse_acpi_topology(void)674 __weak int __init parse_acpi_topology(void)
675 {
676 return 0;
677 }
678
679 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
init_cpu_topology(void)680 void __init init_cpu_topology(void)
681 {
682 reset_cpu_topology();
683
684 /*
685 * Discard anything that was parsed if we hit an error so we
686 * don't use partial information.
687 */
688 if (parse_acpi_topology())
689 reset_cpu_topology();
690 else if (of_have_populated_dt() && parse_dt_topology())
691 reset_cpu_topology();
692 }
693 #endif
694