1 /*
2  * arch/parisc/kernel/topology.c
3  *
4  * Copyright (C) 2017 Helge Deller <deller@gmx.de>
5  *
6  * based on arch/arm/kernel/topology.c
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 
13 #include <linux/percpu.h>
14 #include <linux/sched.h>
15 #include <linux/sched/topology.h>
16 
17 #include <asm/topology.h>
18 
19  /*
20   * cpu topology table
21   */
22 struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly;
23 EXPORT_SYMBOL_GPL(cpu_topology);
24 
cpu_coregroup_mask(int cpu)25 const struct cpumask *cpu_coregroup_mask(int cpu)
26 {
27 	return &cpu_topology[cpu].core_sibling;
28 }
29 
update_siblings_masks(unsigned int cpuid)30 static void update_siblings_masks(unsigned int cpuid)
31 {
32 	struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
33 	int cpu;
34 
35 	/* update core and thread sibling masks */
36 	for_each_possible_cpu(cpu) {
37 		cpu_topo = &cpu_topology[cpu];
38 
39 		if (cpuid_topo->socket_id != cpu_topo->socket_id)
40 			continue;
41 
42 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
43 		if (cpu != cpuid)
44 			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
45 
46 		if (cpuid_topo->core_id != cpu_topo->core_id)
47 			continue;
48 
49 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
50 		if (cpu != cpuid)
51 			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
52 	}
53 	smp_wmb();
54 }
55 
56 static int dualcores_found __initdata;
57 
58 /*
59  * store_cpu_topology is called at boot when only one cpu is running
60  * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
61  * which prevents simultaneous write access to cpu_topology array
62  */
store_cpu_topology(unsigned int cpuid)63 void __init store_cpu_topology(unsigned int cpuid)
64 {
65 	struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid];
66 	struct cpuinfo_parisc *p;
67 	int max_socket = -1;
68 	unsigned long cpu;
69 
70 	/* If the cpu topology has been already set, just return */
71 	if (cpuid_topo->core_id != -1)
72 		return;
73 
74 	/* create cpu topology mapping */
75 	cpuid_topo->thread_id = -1;
76 	cpuid_topo->core_id = 0;
77 
78 	p = &per_cpu(cpu_data, cpuid);
79 	for_each_online_cpu(cpu) {
80 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
81 
82 		if (cpu == cpuid) /* ignore current cpu */
83 			continue;
84 
85 		if (cpuinfo->cpu_loc == p->cpu_loc) {
86 			cpuid_topo->core_id = cpu_topology[cpu].core_id;
87 			if (p->cpu_loc) {
88 				cpuid_topo->core_id++;
89 				cpuid_topo->socket_id = cpu_topology[cpu].socket_id;
90 				dualcores_found = 1;
91 				continue;
92 			}
93 		}
94 
95 		if (cpuid_topo->socket_id == -1)
96 			max_socket = max(max_socket, cpu_topology[cpu].socket_id);
97 	}
98 
99 	if (cpuid_topo->socket_id == -1)
100 		cpuid_topo->socket_id = max_socket + 1;
101 
102 	update_siblings_masks(cpuid);
103 
104 	pr_info("CPU%u: thread %d, cpu %d, socket %d\n",
105 		cpuid, cpu_topology[cpuid].thread_id,
106 		cpu_topology[cpuid].core_id,
107 		cpu_topology[cpuid].socket_id);
108 }
109 
110 static struct sched_domain_topology_level parisc_mc_topology[] = {
111 #ifdef CONFIG_SCHED_MC
112 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
113 #endif
114 
115 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
116 	{ NULL, },
117 };
118 
119 /*
120  * init_cpu_topology is called at boot when only one cpu is running
121  * which prevent simultaneous write access to cpu_topology array
122  */
init_cpu_topology(void)123 void __init init_cpu_topology(void)
124 {
125 	unsigned int cpu;
126 
127 	/* init core mask and capacity */
128 	for_each_possible_cpu(cpu) {
129 		struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
130 
131 		cpu_topo->thread_id = -1;
132 		cpu_topo->core_id =  -1;
133 		cpu_topo->socket_id = -1;
134 		cpumask_clear(&cpu_topo->core_sibling);
135 		cpumask_clear(&cpu_topo->thread_sibling);
136 	}
137 	smp_wmb();
138 
139 	/* Set scheduler topology descriptor */
140 	if (dualcores_found)
141 		set_sched_topology(parisc_mc_topology);
142 }
143