1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_ARM_CPUTYPE_H
3 #define __ASM_ARM_CPUTYPE_H
4
5 #define CPUID_ID 0
6 #define CPUID_CACHETYPE 1
7 #define CPUID_TCM 2
8 #define CPUID_TLBTYPE 3
9 #define CPUID_MPUIR 4
10 #define CPUID_MPIDR 5
11 #define CPUID_REVIDR 6
12
13 #ifdef CONFIG_CPU_V7M
14 #define CPUID_EXT_PFR0 0x40
15 #define CPUID_EXT_PFR1 0x44
16 #define CPUID_EXT_DFR0 0x48
17 #define CPUID_EXT_AFR0 0x4c
18 #define CPUID_EXT_MMFR0 0x50
19 #define CPUID_EXT_MMFR1 0x54
20 #define CPUID_EXT_MMFR2 0x58
21 #define CPUID_EXT_MMFR3 0x5c
22 #define CPUID_EXT_ISAR0 0x60
23 #define CPUID_EXT_ISAR1 0x64
24 #define CPUID_EXT_ISAR2 0x68
25 #define CPUID_EXT_ISAR3 0x6c
26 #define CPUID_EXT_ISAR4 0x70
27 #define CPUID_EXT_ISAR5 0x74
28 #else
29 #define CPUID_EXT_PFR0 "c1, 0"
30 #define CPUID_EXT_PFR1 "c1, 1"
31 #define CPUID_EXT_DFR0 "c1, 2"
32 #define CPUID_EXT_AFR0 "c1, 3"
33 #define CPUID_EXT_MMFR0 "c1, 4"
34 #define CPUID_EXT_MMFR1 "c1, 5"
35 #define CPUID_EXT_MMFR2 "c1, 6"
36 #define CPUID_EXT_MMFR3 "c1, 7"
37 #define CPUID_EXT_ISAR0 "c2, 0"
38 #define CPUID_EXT_ISAR1 "c2, 1"
39 #define CPUID_EXT_ISAR2 "c2, 2"
40 #define CPUID_EXT_ISAR3 "c2, 3"
41 #define CPUID_EXT_ISAR4 "c2, 4"
42 #define CPUID_EXT_ISAR5 "c2, 5"
43 #endif
44
45 #define MPIDR_SMP_BITMASK (0x3 << 30)
46 #define MPIDR_SMP_VALUE (0x2 << 30)
47
48 #define MPIDR_MT_BITMASK (0x1 << 24)
49
50 #define MPIDR_HWID_BITMASK 0xFFFFFF
51
52 #define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
53
54 #define MPIDR_LEVEL_BITS 8
55 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
56 #define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
57
58 #define MPIDR_AFFINITY_LEVEL(mpidr, level) \
59 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
60
61 #define ARM_CPU_IMP_ARM 0x41
62 #define ARM_CPU_IMP_BRCM 0x42
63 #define ARM_CPU_IMP_DEC 0x44
64 #define ARM_CPU_IMP_INTEL 0x69
65
66 /* ARM implemented processors */
67 #define ARM_CPU_PART_ARM1136 0x4100b360
68 #define ARM_CPU_PART_ARM1156 0x4100b560
69 #define ARM_CPU_PART_ARM1176 0x4100b760
70 #define ARM_CPU_PART_ARM11MPCORE 0x4100b020
71 #define ARM_CPU_PART_CORTEX_A8 0x4100c080
72 #define ARM_CPU_PART_CORTEX_A9 0x4100c090
73 #define ARM_CPU_PART_CORTEX_A5 0x4100c050
74 #define ARM_CPU_PART_CORTEX_A7 0x4100c070
75 #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
76 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
77 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
78 #define ARM_CPU_PART_CORTEX_A53 0x4100d030
79 #define ARM_CPU_PART_CORTEX_A57 0x4100d070
80 #define ARM_CPU_PART_CORTEX_A72 0x4100d080
81 #define ARM_CPU_PART_CORTEX_A73 0x4100d090
82 #define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
83 #define ARM_CPU_PART_MASK 0xff00fff0
84
85 /* Broadcom implemented processors */
86 #define ARM_CPU_PART_BRAHMA_B15 0x420000f0
87 #define ARM_CPU_PART_BRAHMA_B53 0x42001000
88
89 /* DEC implemented cores */
90 #define ARM_CPU_PART_SA1100 0x4400a110
91
92 /* Intel implemented cores */
93 #define ARM_CPU_PART_SA1110 0x6900b110
94 #define ARM_CPU_REV_SA1110_A0 0
95 #define ARM_CPU_REV_SA1110_B0 4
96 #define ARM_CPU_REV_SA1110_B1 5
97 #define ARM_CPU_REV_SA1110_B2 6
98 #define ARM_CPU_REV_SA1110_B4 8
99
100 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000
101 #define ARM_CPU_XSCALE_ARCH_V1 0x2000
102 #define ARM_CPU_XSCALE_ARCH_V2 0x4000
103 #define ARM_CPU_XSCALE_ARCH_V3 0x6000
104
105 /* Qualcomm implemented cores */
106 #define ARM_CPU_PART_SCORPION 0x510002d0
107
108 #ifndef __ASSEMBLY__
109
110 #include <linux/stringify.h>
111 #include <linux/kernel.h>
112
113 extern unsigned int processor_id;
114
115 #ifdef CONFIG_CPU_CP15
116 #define read_cpuid(reg) \
117 ({ \
118 unsigned int __val; \
119 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
120 : "=r" (__val) \
121 : \
122 : "cc"); \
123 __val; \
124 })
125
126 /*
127 * The memory clobber prevents gcc 4.5 from reordering the mrc before
128 * any is_smp() tests, which can cause undefined instruction aborts on
129 * ARM1136 r0 due to the missing extended CP15 registers.
130 */
131 #define read_cpuid_ext(ext_reg) \
132 ({ \
133 unsigned int __val; \
134 asm("mrc p15, 0, %0, c0, " ext_reg \
135 : "=r" (__val) \
136 : \
137 : "memory"); \
138 __val; \
139 })
140
141 #elif defined(CONFIG_CPU_V7M)
142
143 #include <asm/io.h>
144 #include <asm/v7m.h>
145
146 #define read_cpuid(reg) \
147 ({ \
148 WARN_ON_ONCE(1); \
149 0; \
150 })
151
read_cpuid_ext(unsigned offset)152 static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset)
153 {
154 return readl(BASEADDR_V7M_SCB + offset);
155 }
156
157 #else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */
158
159 /*
160 * read_cpuid and read_cpuid_ext should only ever be called on machines that
161 * have cp15 so warn on other usages.
162 */
163 #define read_cpuid(reg) \
164 ({ \
165 WARN_ON_ONCE(1); \
166 0; \
167 })
168
169 #define read_cpuid_ext(reg) read_cpuid(reg)
170
171 #endif /* ifdef CONFIG_CPU_CP15 / else */
172
173 #ifdef CONFIG_CPU_CP15
174 /*
175 * The CPU ID never changes at run time, so we might as well tell the
176 * compiler that it's constant. Use this function to read the CPU ID
177 * rather than directly reading processor_id or read_cpuid() directly.
178 */
read_cpuid_id(void)179 static inline unsigned int __attribute_const__ read_cpuid_id(void)
180 {
181 return read_cpuid(CPUID_ID);
182 }
183
read_cpuid_cachetype(void)184 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
185 {
186 return read_cpuid(CPUID_CACHETYPE);
187 }
188
read_cpuid_mputype(void)189 static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
190 {
191 return read_cpuid(CPUID_MPUIR);
192 }
193
194 #elif defined(CONFIG_CPU_V7M)
195
read_cpuid_id(void)196 static inline unsigned int __attribute_const__ read_cpuid_id(void)
197 {
198 return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
199 }
200
read_cpuid_cachetype(void)201 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
202 {
203 return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
204 }
205
read_cpuid_mputype(void)206 static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
207 {
208 return readl(BASEADDR_V7M_SCB + MPU_TYPE);
209 }
210
211 #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
212
read_cpuid_id(void)213 static inline unsigned int __attribute_const__ read_cpuid_id(void)
214 {
215 return processor_id;
216 }
217
218 #endif /* ifdef CONFIG_CPU_CP15 / else */
219
read_cpuid_implementor(void)220 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
221 {
222 return (read_cpuid_id() & 0xFF000000) >> 24;
223 }
224
read_cpuid_revision(void)225 static inline unsigned int __attribute_const__ read_cpuid_revision(void)
226 {
227 return read_cpuid_id() & 0x0000000f;
228 }
229
230 /*
231 * The CPU part number is meaningless without referring to the CPU
232 * implementer: implementers are free to define their own part numbers
233 * which are permitted to clash with other implementer part numbers.
234 */
read_cpuid_part(void)235 static inline unsigned int __attribute_const__ read_cpuid_part(void)
236 {
237 return read_cpuid_id() & ARM_CPU_PART_MASK;
238 }
239
read_cpuid_part_number(void)240 static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
241 {
242 return read_cpuid_id() & 0xFFF0;
243 }
244
xscale_cpu_arch_version(void)245 static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
246 {
247 return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
248 }
249
read_cpuid_tcmstatus(void)250 static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
251 {
252 return read_cpuid(CPUID_TCM);
253 }
254
read_cpuid_mpidr(void)255 static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
256 {
257 return read_cpuid(CPUID_MPIDR);
258 }
259
260 /* StrongARM-11x0 CPUs */
261 #define cpu_is_sa1100() (read_cpuid_part() == ARM_CPU_PART_SA1100)
262 #define cpu_is_sa1110() (read_cpuid_part() == ARM_CPU_PART_SA1110)
263
264 /*
265 * Intel's XScale3 core supports some v6 features (supersections, L2)
266 * but advertises itself as v5 as it does not support the v6 ISA. For
267 * this reason, we need a way to explicitly test for this type of CPU.
268 */
269 #ifndef CONFIG_CPU_XSC3
270 #define cpu_is_xsc3() 0
271 #else
cpu_is_xsc3(void)272 static inline int cpu_is_xsc3(void)
273 {
274 unsigned int id;
275 id = read_cpuid_id() & 0xffffe000;
276 /* It covers both Intel ID and Marvell ID */
277 if ((id == 0x69056000) || (id == 0x56056000))
278 return 1;
279
280 return 0;
281 }
282 #endif
283
284 #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) && \
285 !defined(CONFIG_CPU_MOHAWK)
286 #define cpu_is_xscale_family() 0
287 #else
cpu_is_xscale_family(void)288 static inline int cpu_is_xscale_family(void)
289 {
290 unsigned int id;
291 id = read_cpuid_id() & 0xffffe000;
292
293 switch (id) {
294 case 0x69052000: /* Intel XScale 1 */
295 case 0x69054000: /* Intel XScale 2 */
296 case 0x69056000: /* Intel XScale 3 */
297 case 0x56056000: /* Marvell XScale 3 */
298 case 0x56158000: /* Marvell Mohawk */
299 return 1;
300 }
301
302 return 0;
303 }
304 #endif
305
306 /*
307 * Marvell's PJ4 and PJ4B cores are based on V7 version,
308 * but require a specical sequence for enabling coprocessors.
309 * For this reason, we need a way to distinguish them.
310 */
311 #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
cpu_is_pj4(void)312 static inline int cpu_is_pj4(void)
313 {
314 unsigned int id;
315
316 id = read_cpuid_id();
317 if ((id & 0xff0fff00) == 0x560f5800)
318 return 1;
319
320 return 0;
321 }
322 #else
323 #define cpu_is_pj4() 0
324 #endif
325
cpuid_feature_extract_field(u32 features,int field)326 static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
327 int field)
328 {
329 int feature = (features >> field) & 15;
330
331 /* feature registers are signed values */
332 if (feature > 7)
333 feature -= 16;
334
335 return feature;
336 }
337
338 #define cpuid_feature_extract(reg, field) \
339 cpuid_feature_extract_field(read_cpuid_ext(reg), field)
340
341 #endif /* __ASSEMBLY__ */
342
343 #endif
344