1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU feature definitions
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  *
7  * A note for the weary kernel hacker: the code here is confusing and hard to
8  * follow! That's partly because it's solving a nasty problem, but also because
9  * there's a little bit of over-abstraction that tends to obscure what's going
10  * on behind a maze of helper functions and macros.
11  *
12  * The basic problem is that hardware folks have started gluing together CPUs
13  * with distinct architectural features; in some cases even creating SoCs where
14  * user-visible instructions are available only on a subset of the available
15  * cores. We try to address this by snapshotting the feature registers of the
16  * boot CPU and comparing these with the feature registers of each secondary
17  * CPU when bringing them up. If there is a mismatch, then we update the
18  * snapshot state to indicate the lowest-common denominator of the feature,
19  * known as the "safe" value. This snapshot state can be queried to view the
20  * "sanitised" value of a feature register.
21  *
22  * The sanitised register values are used to decide which capabilities we
23  * have in the system. These may be in the form of traditional "hwcaps"
24  * advertised to userspace or internal "cpucaps" which are used to configure
25  * things like alternative patching and static keys. While a feature mismatch
26  * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch
27  * may prevent a CPU from being onlined at all.
28  *
29  * Some implementation details worth remembering:
30  *
31  * - Mismatched features are *always* sanitised to a "safe" value, which
32  *   usually indicates that the feature is not supported.
33  *
34  * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
35  *   warning when onlining an offending CPU and the kernel will be tainted
36  *   with TAINT_CPU_OUT_OF_SPEC.
37  *
38  * - Features marked as FTR_VISIBLE have their sanitised value visible to
39  *   userspace. FTR_VISIBLE features in registers that are only visible
40  *   to EL0 by trapping *must* have a corresponding HWCAP so that late
41  *   onlining of CPUs cannot lead to features disappearing at runtime.
42  *
43  * - A "feature" is typically a 4-bit register field. A "capability" is the
44  *   high-level description derived from the sanitised field value.
45  *
46  * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
47  *   scheme for fields in ID registers") to understand when feature fields
48  *   may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly).
49  *
50  * - KVM exposes its own view of the feature registers to guest operating
51  *   systems regardless of FTR_VISIBLE. This is typically driven from the
52  *   sanitised register values to allow virtual CPUs to be migrated between
53  *   arbitrary physical CPUs, but some features not present on the host are
54  *   also advertised and emulated. Look at sys_reg_descs[] for the gory
55  *   details.
56  *
57  * - If the arm64_ftr_bits[] for a register has a missing field, then this
58  *   field is treated as STRICT RES0, including for read_sanitised_ftr_reg().
59  *   This is stronger than FTR_HIDDEN and can be used to hide features from
60  *   KVM guests.
61  */
62 
63 #define pr_fmt(fmt) "CPU features: " fmt
64 
65 #include <linux/bsearch.h>
66 #include <linux/cpumask.h>
67 #include <linux/crash_dump.h>
68 #include <linux/sort.h>
69 #include <linux/stop_machine.h>
70 #include <linux/types.h>
71 #include <linux/mm.h>
72 #include <linux/cpu.h>
73 #include <asm/cpu.h>
74 #include <asm/cpufeature.h>
75 #include <asm/cpu_ops.h>
76 #include <asm/fpsimd.h>
77 #include <asm/mmu_context.h>
78 #include <asm/mte.h>
79 #include <asm/processor.h>
80 #include <asm/sysreg.h>
81 #include <asm/traps.h>
82 #include <asm/virt.h>
83 
84 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
85 static unsigned long elf_hwcap __read_mostly;
86 
87 #ifdef CONFIG_COMPAT
88 #define COMPAT_ELF_HWCAP_DEFAULT	\
89 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
90 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
91 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
92 				 COMPAT_HWCAP_LPAE)
93 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
94 unsigned int compat_elf_hwcap2 __read_mostly;
95 #endif
96 
97 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
98 EXPORT_SYMBOL(cpu_hwcaps);
99 static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
100 
101 /* Need also bit for ARM64_CB_PATCH */
102 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
103 
104 bool arm64_use_ng_mappings = false;
105 EXPORT_SYMBOL(arm64_use_ng_mappings);
106 
107 /*
108  * Flag to indicate if we have computed the system wide
109  * capabilities based on the boot time active CPUs. This
110  * will be used to determine if a new booting CPU should
111  * go through the verification process to make sure that it
112  * supports the system capabilities, without using a hotplug
113  * notifier. This is also used to decide if we could use
114  * the fast path for checking constant CPU caps.
115  */
116 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
117 EXPORT_SYMBOL(arm64_const_caps_ready);
finalize_system_capabilities(void)118 static inline void finalize_system_capabilities(void)
119 {
120 	static_branch_enable(&arm64_const_caps_ready);
121 }
122 
dump_cpu_features(void)123 void dump_cpu_features(void)
124 {
125 	/* file-wide pr_fmt adds "CPU features: " prefix */
126 	pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
127 }
128 
129 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
130 EXPORT_SYMBOL(cpu_hwcap_keys);
131 
132 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
133 	{						\
134 		.sign = SIGNED,				\
135 		.visible = VISIBLE,			\
136 		.strict = STRICT,			\
137 		.type = TYPE,				\
138 		.shift = SHIFT,				\
139 		.width = WIDTH,				\
140 		.safe_val = SAFE_VAL,			\
141 	}
142 
143 /* Define a feature with unsigned values */
144 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
145 	__ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
146 
147 /* Define a feature with a signed value */
148 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
149 	__ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
150 
151 #define ARM64_FTR_END					\
152 	{						\
153 		.width = 0,				\
154 	}
155 
156 /* meta feature for alternatives */
157 static bool __maybe_unused
158 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
159 
160 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
161 
162 static bool __system_matches_cap(unsigned int n);
163 
164 /*
165  * NOTE: Any changes to the visibility of features should be kept in
166  * sync with the documentation of the CPU feature register ABI.
167  */
168 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
169 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
170 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0),
171 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
172 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
173 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
174 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
175 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
176 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
177 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
178 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
179 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
180 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
181 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
182 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
183 	ARM64_FTR_END,
184 };
185 
186 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
187 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
188 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
189 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
190 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
191 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
192 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
193 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
194 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
195 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
196 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
197 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
198 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
199 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
200 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
201 		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
202 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
203 		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
204 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
205 	ARM64_FTR_END,
206 };
207 
208 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
209 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
210 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
211 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
212 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
213 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
214 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
215 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
216 				   FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
217 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
218 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
219 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
220 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
221 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
222 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
223 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
224 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
225 	ARM64_FTR_END,
226 };
227 
228 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
229 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
230 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
231 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
232 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
233 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
234 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
235 				    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
236 	ARM64_FTR_END,
237 };
238 
239 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
240 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
241 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
242 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
243 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
244 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
245 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
246 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
247 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
248 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
249 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
250 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
251 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
252 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
253 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
254 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
255 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
256 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
257 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
258 	ARM64_FTR_END,
259 };
260 
261 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
262 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
263 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
264 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
265 	/*
266 	 * Page size not being supported at Stage-2 is not fatal. You
267 	 * just give up KVM if PAGE_SIZE isn't supported there. Go fix
268 	 * your favourite nesting hypervisor.
269 	 *
270 	 * There is a small corner case where the hypervisor explicitly
271 	 * advertises a given granule size at Stage-2 (value 2) on some
272 	 * vCPUs, and uses the fallback to Stage-1 (value 0) for other
273 	 * vCPUs. Although this is not forbidden by the architecture, it
274 	 * indicates that the hypervisor is being silly (or buggy).
275 	 *
276 	 * We make no effort to cope with this and pretend that if these
277 	 * fields are inconsistent across vCPUs, then it isn't worth
278 	 * trying to bring KVM up.
279 	 */
280 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
281 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
282 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
283 	/*
284 	 * We already refuse to boot CPUs that don't support our configured
285 	 * page size, so we can only detect mismatches for a page size other
286 	 * than the one we're currently using. Unfortunately, SoCs like this
287 	 * exist in the wild so, even though we don't like it, we'll have to go
288 	 * along with it and treat them as non-strict.
289 	 */
290 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
291 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
292 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
293 
294 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
295 	/* Linux shouldn't care about secure memory */
296 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
297 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
298 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
299 	/*
300 	 * Differing PARange is fine as long as all peripherals and memory are mapped
301 	 * within the minimum PARange of all CPUs
302 	 */
303 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
304 	ARM64_FTR_END,
305 };
306 
307 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
308 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
309 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
310 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
311 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
312 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
313 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
314 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
315 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
316 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
317 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
318 	ARM64_FTR_END,
319 };
320 
321 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
322 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
323 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
324 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
325 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
326 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
327 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
328 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
329 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
330 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
331 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
332 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
333 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
334 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
335 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
336 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
337 	ARM64_FTR_END,
338 };
339 
340 static const struct arm64_ftr_bits ftr_ctr[] = {
341 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
342 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
343 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
344 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
345 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
346 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
347 	/*
348 	 * Linux can handle differing I-cache policies. Userspace JITs will
349 	 * make use of *minLine.
350 	 * If we have differing I-cache policies, report it as the weakest - VIPT.
351 	 */
352 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT),	/* L1Ip */
353 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
354 	ARM64_FTR_END,
355 };
356 
357 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
358 	.name		= "SYS_CTR_EL0",
359 	.ftr_bits	= ftr_ctr
360 };
361 
362 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
363 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
364 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
365 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
366 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
367 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
368 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
369 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
370 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
371 	ARM64_FTR_END,
372 };
373 
374 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
375 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
376 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
377 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
378 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
379 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
380 	/*
381 	 * We can instantiate multiple PMU instances with different levels
382 	 * of support.
383 	 */
384 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
385 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
386 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
387 	ARM64_FTR_END,
388 };
389 
390 static const struct arm64_ftr_bits ftr_mvfr2[] = {
391 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
392 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
393 	ARM64_FTR_END,
394 };
395 
396 static const struct arm64_ftr_bits ftr_dczid[] = {
397 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_DZP_SHIFT, 1, 1),
398 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0),
399 	ARM64_FTR_END,
400 };
401 
402 static const struct arm64_ftr_bits ftr_id_isar0[] = {
403 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
404 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
405 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0),
406 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0),
407 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0),
408 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0),
409 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0),
410 	ARM64_FTR_END,
411 };
412 
413 static const struct arm64_ftr_bits ftr_id_isar5[] = {
414 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
415 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
416 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
417 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
418 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
419 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
420 	ARM64_FTR_END,
421 };
422 
423 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
424 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0),
425 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0),
426 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0),
427 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
428 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
429 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
430 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
431 
432 	/*
433 	 * SpecSEI = 1 indicates that the PE might generate an SError on an
434 	 * external abort on speculative read. It is safe to assume that an
435 	 * SError might be generated than it will not be. Hence it has been
436 	 * classified as FTR_HIGHER_SAFE.
437 	 */
438 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0),
439 	ARM64_FTR_END,
440 };
441 
442 static const struct arm64_ftr_bits ftr_id_isar4[] = {
443 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0),
444 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0),
445 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0),
446 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0),
447 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0),
448 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0),
449 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0),
450 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0),
451 	ARM64_FTR_END,
452 };
453 
454 static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
455 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0),
456 	ARM64_FTR_END,
457 };
458 
459 static const struct arm64_ftr_bits ftr_id_isar6[] = {
460 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
461 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
462 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
463 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
464 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
465 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
466 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
467 	ARM64_FTR_END,
468 };
469 
470 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
471 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
472 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
473 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
474 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
475 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
476 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
477 	ARM64_FTR_END,
478 };
479 
480 static const struct arm64_ftr_bits ftr_id_pfr1[] = {
481 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0),
482 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0),
483 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0),
484 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0),
485 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0),
486 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0),
487 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0),
488 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0),
489 	ARM64_FTR_END,
490 };
491 
492 static const struct arm64_ftr_bits ftr_id_pfr2[] = {
493 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
494 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
495 	ARM64_FTR_END,
496 };
497 
498 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
499 	/* [31:28] TraceFilt */
500 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_PERFMON_SHIFT, 4, 0xf),
501 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
502 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
503 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
504 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
505 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
506 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
507 	ARM64_FTR_END,
508 };
509 
510 static const struct arm64_ftr_bits ftr_id_dfr1[] = {
511 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0),
512 	ARM64_FTR_END,
513 };
514 
515 static const struct arm64_ftr_bits ftr_zcr[] = {
516 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
517 		ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),	/* LEN */
518 	ARM64_FTR_END,
519 };
520 
521 /*
522  * Common ftr bits for a 32bit register with all hidden, strict
523  * attributes, with 4bit feature fields and a default safe value of
524  * 0. Covers the following 32bit registers:
525  * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
526  */
527 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
528 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
529 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
530 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
531 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
532 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
533 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
534 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
535 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
536 	ARM64_FTR_END,
537 };
538 
539 /* Table for a single 32bit feature value */
540 static const struct arm64_ftr_bits ftr_single32[] = {
541 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
542 	ARM64_FTR_END,
543 };
544 
545 static const struct arm64_ftr_bits ftr_raz[] = {
546 	ARM64_FTR_END,
547 };
548 
549 #define ARM64_FTR_REG(id, table) {		\
550 	.sys_id = id,				\
551 	.reg = 	&(struct arm64_ftr_reg){	\
552 		.name = #id,			\
553 		.ftr_bits = &((table)[0]),	\
554 	}}
555 
556 static const struct __ftr_reg_entry {
557 	u32			sys_id;
558 	struct arm64_ftr_reg 	*reg;
559 } arm64_ftr_regs[] = {
560 
561 	/* Op1 = 0, CRn = 0, CRm = 1 */
562 	ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
563 	ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1),
564 	ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
565 	ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
566 	ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
567 	ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
568 	ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
569 
570 	/* Op1 = 0, CRn = 0, CRm = 2 */
571 	ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0),
572 	ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
573 	ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
574 	ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
575 	ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4),
576 	ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
577 	ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
578 	ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
579 
580 	/* Op1 = 0, CRn = 0, CRm = 3 */
581 	ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
582 	ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
583 	ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
584 	ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
585 	ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
586 	ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),
587 
588 	/* Op1 = 0, CRn = 0, CRm = 4 */
589 	ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
590 	ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
591 	ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
592 
593 	/* Op1 = 0, CRn = 0, CRm = 5 */
594 	ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
595 	ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
596 
597 	/* Op1 = 0, CRn = 0, CRm = 6 */
598 	ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
599 	ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
600 
601 	/* Op1 = 0, CRn = 0, CRm = 7 */
602 	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
603 	ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
604 	ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
605 
606 	/* Op1 = 0, CRn = 1, CRm = 2 */
607 	ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
608 
609 	/* Op1 = 3, CRn = 0, CRm = 0 */
610 	{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
611 	ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
612 
613 	/* Op1 = 3, CRn = 14, CRm = 0 */
614 	ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
615 };
616 
search_cmp_ftr_reg(const void * id,const void * regp)617 static int search_cmp_ftr_reg(const void *id, const void *regp)
618 {
619 	return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
620 }
621 
622 /*
623  * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
624  * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the
625  * ascending order of sys_id, we use binary search to find a matching
626  * entry.
627  *
628  * returns - Upon success,  matching ftr_reg entry for id.
629  *         - NULL on failure. It is upto the caller to decide
630  *	     the impact of a failure.
631  */
get_arm64_ftr_reg_nowarn(u32 sys_id)632 static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
633 {
634 	const struct __ftr_reg_entry *ret;
635 
636 	ret = bsearch((const void *)(unsigned long)sys_id,
637 			arm64_ftr_regs,
638 			ARRAY_SIZE(arm64_ftr_regs),
639 			sizeof(arm64_ftr_regs[0]),
640 			search_cmp_ftr_reg);
641 	if (ret)
642 		return ret->reg;
643 	return NULL;
644 }
645 
646 /*
647  * get_arm64_ftr_reg - Looks up a feature register entry using
648  * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn().
649  *
650  * returns - Upon success,  matching ftr_reg entry for id.
651  *         - NULL on failure but with an WARN_ON().
652  */
get_arm64_ftr_reg(u32 sys_id)653 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
654 {
655 	struct arm64_ftr_reg *reg;
656 
657 	reg = get_arm64_ftr_reg_nowarn(sys_id);
658 
659 	/*
660 	 * Requesting a non-existent register search is an error. Warn
661 	 * and let the caller handle it.
662 	 */
663 	WARN_ON(!reg);
664 	return reg;
665 }
666 
arm64_ftr_set_value(const struct arm64_ftr_bits * ftrp,s64 reg,s64 ftr_val)667 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
668 			       s64 ftr_val)
669 {
670 	u64 mask = arm64_ftr_mask(ftrp);
671 
672 	reg &= ~mask;
673 	reg |= (ftr_val << ftrp->shift) & mask;
674 	return reg;
675 }
676 
arm64_ftr_safe_value(const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)677 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
678 				s64 cur)
679 {
680 	s64 ret = 0;
681 
682 	switch (ftrp->type) {
683 	case FTR_EXACT:
684 		ret = ftrp->safe_val;
685 		break;
686 	case FTR_LOWER_SAFE:
687 		ret = new < cur ? new : cur;
688 		break;
689 	case FTR_HIGHER_OR_ZERO_SAFE:
690 		if (!cur || !new)
691 			break;
692 		fallthrough;
693 	case FTR_HIGHER_SAFE:
694 		ret = new > cur ? new : cur;
695 		break;
696 	default:
697 		BUG();
698 	}
699 
700 	return ret;
701 }
702 
sort_ftr_regs(void)703 static void __init sort_ftr_regs(void)
704 {
705 	unsigned int i;
706 
707 	for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
708 		const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
709 		const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
710 		unsigned int j = 0;
711 
712 		/*
713 		 * Features here must be sorted in descending order with respect
714 		 * to their shift values and should not overlap with each other.
715 		 */
716 		for (; ftr_bits->width != 0; ftr_bits++, j++) {
717 			unsigned int width = ftr_reg->ftr_bits[j].width;
718 			unsigned int shift = ftr_reg->ftr_bits[j].shift;
719 			unsigned int prev_shift;
720 
721 			WARN((shift  + width) > 64,
722 				"%s has invalid feature at shift %d\n",
723 				ftr_reg->name, shift);
724 
725 			/*
726 			 * Skip the first feature. There is nothing to
727 			 * compare against for now.
728 			 */
729 			if (j == 0)
730 				continue;
731 
732 			prev_shift = ftr_reg->ftr_bits[j - 1].shift;
733 			WARN((shift + width) > prev_shift,
734 				"%s has feature overlap at shift %d\n",
735 				ftr_reg->name, shift);
736 		}
737 
738 		/*
739 		 * Skip the first register. There is nothing to
740 		 * compare against for now.
741 		 */
742 		if (i == 0)
743 			continue;
744 		/*
745 		 * Registers here must be sorted in ascending order with respect
746 		 * to sys_id for subsequent binary search in get_arm64_ftr_reg()
747 		 * to work correctly.
748 		 */
749 		BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
750 	}
751 }
752 
753 /*
754  * Initialise the CPU feature register from Boot CPU values.
755  * Also initiliases the strict_mask for the register.
756  * Any bits that are not covered by an arm64_ftr_bits entry are considered
757  * RES0 for the system-wide value, and must strictly match.
758  */
init_cpu_ftr_reg(u32 sys_reg,u64 new)759 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
760 {
761 	u64 val = 0;
762 	u64 strict_mask = ~0x0ULL;
763 	u64 user_mask = 0;
764 	u64 valid_mask = 0;
765 
766 	const struct arm64_ftr_bits *ftrp;
767 	struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
768 
769 	if (!reg)
770 		return;
771 
772 	for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
773 		u64 ftr_mask = arm64_ftr_mask(ftrp);
774 		s64 ftr_new = arm64_ftr_value(ftrp, new);
775 
776 		val = arm64_ftr_set_value(ftrp, val, ftr_new);
777 
778 		valid_mask |= ftr_mask;
779 		if (!ftrp->strict)
780 			strict_mask &= ~ftr_mask;
781 		if (ftrp->visible)
782 			user_mask |= ftr_mask;
783 		else
784 			reg->user_val = arm64_ftr_set_value(ftrp,
785 							    reg->user_val,
786 							    ftrp->safe_val);
787 	}
788 
789 	val &= valid_mask;
790 
791 	reg->sys_val = val;
792 	reg->strict_mask = strict_mask;
793 	reg->user_mask = user_mask;
794 }
795 
796 extern const struct arm64_cpu_capabilities arm64_errata[];
797 static const struct arm64_cpu_capabilities arm64_features[];
798 
799 static void __init
init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities * caps)800 init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
801 {
802 	for (; caps->matches; caps++) {
803 		if (WARN(caps->capability >= ARM64_NCAPS,
804 			"Invalid capability %d\n", caps->capability))
805 			continue;
806 		if (WARN(cpu_hwcaps_ptrs[caps->capability],
807 			"Duplicate entry for capability %d\n",
808 			caps->capability))
809 			continue;
810 		cpu_hwcaps_ptrs[caps->capability] = caps;
811 	}
812 }
813 
init_cpu_hwcaps_indirect_list(void)814 static void __init init_cpu_hwcaps_indirect_list(void)
815 {
816 	init_cpu_hwcaps_indirect_list_from_array(arm64_features);
817 	init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
818 }
819 
820 static void __init setup_boot_cpu_capabilities(void);
821 
init_cpu_features(struct cpuinfo_arm64 * info)822 void __init init_cpu_features(struct cpuinfo_arm64 *info)
823 {
824 	/* Before we start using the tables, make sure it is sorted */
825 	sort_ftr_regs();
826 
827 	init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
828 	init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
829 	init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
830 	init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
831 	init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
832 	init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
833 	init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
834 	init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
835 	init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
836 	init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
837 	init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
838 	init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
839 	init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
840 
841 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
842 		init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
843 		init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
844 		init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
845 		init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
846 		init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
847 		init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
848 		init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
849 		init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
850 		init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
851 		init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
852 		init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
853 		init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
854 		init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
855 		init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
856 		init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
857 		init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
858 		init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
859 		init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
860 		init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
861 		init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
862 		init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
863 	}
864 
865 	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
866 		init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
867 		sve_init_vq_map();
868 	}
869 
870 	/*
871 	 * Initialize the indirect array of CPU hwcaps capabilities pointers
872 	 * before we handle the boot CPU below.
873 	 */
874 	init_cpu_hwcaps_indirect_list();
875 
876 	/*
877 	 * Detect and enable early CPU capabilities based on the boot CPU,
878 	 * after we have initialised the CPU feature infrastructure.
879 	 */
880 	setup_boot_cpu_capabilities();
881 }
882 
update_cpu_ftr_reg(struct arm64_ftr_reg * reg,u64 new)883 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
884 {
885 	const struct arm64_ftr_bits *ftrp;
886 
887 	for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
888 		s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
889 		s64 ftr_new = arm64_ftr_value(ftrp, new);
890 
891 		if (ftr_cur == ftr_new)
892 			continue;
893 		/* Find a safe value */
894 		ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
895 		reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
896 	}
897 
898 }
899 
check_update_ftr_reg(u32 sys_id,int cpu,u64 val,u64 boot)900 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
901 {
902 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
903 
904 	if (!regp)
905 		return 0;
906 
907 	update_cpu_ftr_reg(regp, val);
908 	if ((boot & regp->strict_mask) == (val & regp->strict_mask))
909 		return 0;
910 	pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
911 			regp->name, boot, cpu, val);
912 	return 1;
913 }
914 
relax_cpu_ftr_reg(u32 sys_id,int field)915 static void relax_cpu_ftr_reg(u32 sys_id, int field)
916 {
917 	const struct arm64_ftr_bits *ftrp;
918 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
919 
920 	if (!regp)
921 		return;
922 
923 	for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
924 		if (ftrp->shift == field) {
925 			regp->strict_mask &= ~arm64_ftr_mask(ftrp);
926 			break;
927 		}
928 	}
929 
930 	/* Bogus field? */
931 	WARN_ON(!ftrp->width);
932 }
933 
update_32bit_cpu_features(int cpu,struct cpuinfo_arm64 * info,struct cpuinfo_arm64 * boot)934 static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
935 				     struct cpuinfo_arm64 *boot)
936 {
937 	int taint = 0;
938 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
939 
940 	/*
941 	 * If we don't have AArch32 at all then skip the checks entirely
942 	 * as the register values may be UNKNOWN and we're not going to be
943 	 * using them for anything.
944 	 */
945 	if (!id_aa64pfr0_32bit_el0(pfr0))
946 		return taint;
947 
948 	/*
949 	 * If we don't have AArch32 at EL1, then relax the strictness of
950 	 * EL1-dependent register fields to avoid spurious sanity check fails.
951 	 */
952 	if (!id_aa64pfr0_32bit_el1(pfr0)) {
953 		relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT);
954 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT);
955 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT);
956 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT);
957 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT);
958 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT);
959 	}
960 
961 	taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
962 				      info->reg_id_dfr0, boot->reg_id_dfr0);
963 	taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
964 				      info->reg_id_dfr1, boot->reg_id_dfr1);
965 	taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
966 				      info->reg_id_isar0, boot->reg_id_isar0);
967 	taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
968 				      info->reg_id_isar1, boot->reg_id_isar1);
969 	taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
970 				      info->reg_id_isar2, boot->reg_id_isar2);
971 	taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
972 				      info->reg_id_isar3, boot->reg_id_isar3);
973 	taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
974 				      info->reg_id_isar4, boot->reg_id_isar4);
975 	taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
976 				      info->reg_id_isar5, boot->reg_id_isar5);
977 	taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
978 				      info->reg_id_isar6, boot->reg_id_isar6);
979 
980 	/*
981 	 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
982 	 * ACTLR formats could differ across CPUs and therefore would have to
983 	 * be trapped for virtualization anyway.
984 	 */
985 	taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
986 				      info->reg_id_mmfr0, boot->reg_id_mmfr0);
987 	taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
988 				      info->reg_id_mmfr1, boot->reg_id_mmfr1);
989 	taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
990 				      info->reg_id_mmfr2, boot->reg_id_mmfr2);
991 	taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
992 				      info->reg_id_mmfr3, boot->reg_id_mmfr3);
993 	taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
994 				      info->reg_id_mmfr4, boot->reg_id_mmfr4);
995 	taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
996 				      info->reg_id_mmfr5, boot->reg_id_mmfr5);
997 	taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
998 				      info->reg_id_pfr0, boot->reg_id_pfr0);
999 	taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
1000 				      info->reg_id_pfr1, boot->reg_id_pfr1);
1001 	taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
1002 				      info->reg_id_pfr2, boot->reg_id_pfr2);
1003 	taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
1004 				      info->reg_mvfr0, boot->reg_mvfr0);
1005 	taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
1006 				      info->reg_mvfr1, boot->reg_mvfr1);
1007 	taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
1008 				      info->reg_mvfr2, boot->reg_mvfr2);
1009 
1010 	return taint;
1011 }
1012 
1013 /*
1014  * Update system wide CPU feature registers with the values from a
1015  * non-boot CPU. Also performs SANITY checks to make sure that there
1016  * aren't any insane variations from that of the boot CPU.
1017  */
update_cpu_features(int cpu,struct cpuinfo_arm64 * info,struct cpuinfo_arm64 * boot)1018 void update_cpu_features(int cpu,
1019 			 struct cpuinfo_arm64 *info,
1020 			 struct cpuinfo_arm64 *boot)
1021 {
1022 	int taint = 0;
1023 
1024 	/*
1025 	 * The kernel can handle differing I-cache policies, but otherwise
1026 	 * caches should look identical. Userspace JITs will make use of
1027 	 * *minLine.
1028 	 */
1029 	taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
1030 				      info->reg_ctr, boot->reg_ctr);
1031 
1032 	/*
1033 	 * Userspace may perform DC ZVA instructions. Mismatched block sizes
1034 	 * could result in too much or too little memory being zeroed if a
1035 	 * process is preempted and migrated between CPUs.
1036 	 */
1037 	taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
1038 				      info->reg_dczid, boot->reg_dczid);
1039 
1040 	/* If different, timekeeping will be broken (especially with KVM) */
1041 	taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
1042 				      info->reg_cntfrq, boot->reg_cntfrq);
1043 
1044 	/*
1045 	 * The kernel uses self-hosted debug features and expects CPUs to
1046 	 * support identical debug features. We presently need CTX_CMPs, WRPs,
1047 	 * and BRPs to be identical.
1048 	 * ID_AA64DFR1 is currently RES0.
1049 	 */
1050 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
1051 				      info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
1052 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
1053 				      info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
1054 	/*
1055 	 * Even in big.LITTLE, processors should be identical instruction-set
1056 	 * wise.
1057 	 */
1058 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
1059 				      info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
1060 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
1061 				      info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
1062 
1063 	/*
1064 	 * Differing PARange support is fine as long as all peripherals and
1065 	 * memory are mapped within the minimum PARange of all CPUs.
1066 	 * Linux should not care about secure memory.
1067 	 */
1068 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
1069 				      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
1070 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
1071 				      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
1072 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
1073 				      info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
1074 
1075 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
1076 				      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
1077 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
1078 				      info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
1079 
1080 	taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
1081 				      info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
1082 
1083 	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
1084 		taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
1085 					info->reg_zcr, boot->reg_zcr);
1086 
1087 		/* Probe vector lengths, unless we already gave up on SVE */
1088 		if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
1089 		    !system_capabilities_finalized())
1090 			sve_update_vq_map();
1091 	}
1092 
1093 	/*
1094 	 * This relies on a sanitised view of the AArch64 ID registers
1095 	 * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
1096 	 */
1097 	taint |= update_32bit_cpu_features(cpu, info, boot);
1098 
1099 	/*
1100 	 * Mismatched CPU features are a recipe for disaster. Don't even
1101 	 * pretend to support them.
1102 	 */
1103 	if (taint) {
1104 		pr_warn_once("Unsupported CPU feature variation detected.\n");
1105 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1106 	}
1107 }
1108 
read_sanitised_ftr_reg(u32 id)1109 u64 read_sanitised_ftr_reg(u32 id)
1110 {
1111 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
1112 
1113 	if (!regp)
1114 		return 0;
1115 	return regp->sys_val;
1116 }
1117 EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
1118 
1119 #define read_sysreg_case(r)	\
1120 	case r:		return read_sysreg_s(r)
1121 
1122 /*
1123  * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
1124  * Read the system register on the current CPU
1125  */
__read_sysreg_by_encoding(u32 sys_id)1126 static u64 __read_sysreg_by_encoding(u32 sys_id)
1127 {
1128 	switch (sys_id) {
1129 	read_sysreg_case(SYS_ID_PFR0_EL1);
1130 	read_sysreg_case(SYS_ID_PFR1_EL1);
1131 	read_sysreg_case(SYS_ID_PFR2_EL1);
1132 	read_sysreg_case(SYS_ID_DFR0_EL1);
1133 	read_sysreg_case(SYS_ID_DFR1_EL1);
1134 	read_sysreg_case(SYS_ID_MMFR0_EL1);
1135 	read_sysreg_case(SYS_ID_MMFR1_EL1);
1136 	read_sysreg_case(SYS_ID_MMFR2_EL1);
1137 	read_sysreg_case(SYS_ID_MMFR3_EL1);
1138 	read_sysreg_case(SYS_ID_MMFR4_EL1);
1139 	read_sysreg_case(SYS_ID_MMFR5_EL1);
1140 	read_sysreg_case(SYS_ID_ISAR0_EL1);
1141 	read_sysreg_case(SYS_ID_ISAR1_EL1);
1142 	read_sysreg_case(SYS_ID_ISAR2_EL1);
1143 	read_sysreg_case(SYS_ID_ISAR3_EL1);
1144 	read_sysreg_case(SYS_ID_ISAR4_EL1);
1145 	read_sysreg_case(SYS_ID_ISAR5_EL1);
1146 	read_sysreg_case(SYS_ID_ISAR6_EL1);
1147 	read_sysreg_case(SYS_MVFR0_EL1);
1148 	read_sysreg_case(SYS_MVFR1_EL1);
1149 	read_sysreg_case(SYS_MVFR2_EL1);
1150 
1151 	read_sysreg_case(SYS_ID_AA64PFR0_EL1);
1152 	read_sysreg_case(SYS_ID_AA64PFR1_EL1);
1153 	read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
1154 	read_sysreg_case(SYS_ID_AA64DFR0_EL1);
1155 	read_sysreg_case(SYS_ID_AA64DFR1_EL1);
1156 	read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
1157 	read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
1158 	read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
1159 	read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
1160 	read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
1161 
1162 	read_sysreg_case(SYS_CNTFRQ_EL0);
1163 	read_sysreg_case(SYS_CTR_EL0);
1164 	read_sysreg_case(SYS_DCZID_EL0);
1165 
1166 	default:
1167 		BUG();
1168 		return 0;
1169 	}
1170 }
1171 
1172 #include <linux/irqchip/arm-gic-v3.h>
1173 
1174 static bool
feature_matches(u64 reg,const struct arm64_cpu_capabilities * entry)1175 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
1176 {
1177 	int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
1178 
1179 	return val >= entry->min_field_value;
1180 }
1181 
1182 static bool
has_cpuid_feature(const struct arm64_cpu_capabilities * entry,int scope)1183 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
1184 {
1185 	u64 val;
1186 
1187 	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
1188 	if (scope == SCOPE_SYSTEM)
1189 		val = read_sanitised_ftr_reg(entry->sys_reg);
1190 	else
1191 		val = __read_sysreg_by_encoding(entry->sys_reg);
1192 
1193 	return feature_matches(val, entry);
1194 }
1195 
has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities * entry,int scope)1196 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
1197 {
1198 	bool has_sre;
1199 
1200 	if (!has_cpuid_feature(entry, scope))
1201 		return false;
1202 
1203 	has_sre = gic_enable_sre();
1204 	if (!has_sre)
1205 		pr_warn_once("%s present but disabled by higher exception level\n",
1206 			     entry->desc);
1207 
1208 	return has_sre;
1209 }
1210 
has_no_hw_prefetch(const struct arm64_cpu_capabilities * entry,int __unused)1211 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
1212 {
1213 	u32 midr = read_cpuid_id();
1214 
1215 	/* Cavium ThunderX pass 1.x and 2.x */
1216 	return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
1217 		MIDR_CPU_VAR_REV(0, 0),
1218 		MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
1219 }
1220 
has_no_fpsimd(const struct arm64_cpu_capabilities * entry,int __unused)1221 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
1222 {
1223 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1224 
1225 	return cpuid_feature_extract_signed_field(pfr0,
1226 					ID_AA64PFR0_FP_SHIFT) < 0;
1227 }
1228 
has_cache_idc(const struct arm64_cpu_capabilities * entry,int scope)1229 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
1230 			  int scope)
1231 {
1232 	u64 ctr;
1233 
1234 	if (scope == SCOPE_SYSTEM)
1235 		ctr = arm64_ftr_reg_ctrel0.sys_val;
1236 	else
1237 		ctr = read_cpuid_effective_cachetype();
1238 
1239 	return ctr & BIT(CTR_IDC_SHIFT);
1240 }
1241 
cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities * __unused)1242 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
1243 {
1244 	/*
1245 	 * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
1246 	 * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
1247 	 * to the CTR_EL0 on this CPU and emulate it with the real/safe
1248 	 * value.
1249 	 */
1250 	if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
1251 		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
1252 }
1253 
has_cache_dic(const struct arm64_cpu_capabilities * entry,int scope)1254 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
1255 			  int scope)
1256 {
1257 	u64 ctr;
1258 
1259 	if (scope == SCOPE_SYSTEM)
1260 		ctr = arm64_ftr_reg_ctrel0.sys_val;
1261 	else
1262 		ctr = read_cpuid_cachetype();
1263 
1264 	return ctr & BIT(CTR_DIC_SHIFT);
1265 }
1266 
1267 static bool __maybe_unused
has_useable_cnp(const struct arm64_cpu_capabilities * entry,int scope)1268 has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
1269 {
1270 	/*
1271 	 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
1272 	 * may share TLB entries with a CPU stuck in the crashed
1273 	 * kernel.
1274 	 */
1275 	 if (is_kdump_kernel())
1276 		return false;
1277 
1278 	return has_cpuid_feature(entry, scope);
1279 }
1280 
1281 /*
1282  * This check is triggered during the early boot before the cpufeature
1283  * is initialised. Checking the status on the local CPU allows the boot
1284  * CPU to detect the need for non-global mappings and thus avoiding a
1285  * pagetable re-write after all the CPUs are booted. This check will be
1286  * anyway run on individual CPUs, allowing us to get the consistent
1287  * state once the SMP CPUs are up and thus make the switch to non-global
1288  * mappings if required.
1289  */
kaslr_requires_kpti(void)1290 bool kaslr_requires_kpti(void)
1291 {
1292 	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
1293 		return false;
1294 
1295 	/*
1296 	 * E0PD does a similar job to KPTI so can be used instead
1297 	 * where available.
1298 	 */
1299 	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
1300 		u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1301 		if (cpuid_feature_extract_unsigned_field(mmfr2,
1302 						ID_AA64MMFR2_E0PD_SHIFT))
1303 			return false;
1304 	}
1305 
1306 	/*
1307 	 * Systems affected by Cavium erratum 24756 are incompatible
1308 	 * with KPTI.
1309 	 */
1310 	if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
1311 		extern const struct midr_range cavium_erratum_27456_cpus[];
1312 
1313 		if (is_midr_in_range_list(read_cpuid_id(),
1314 					  cavium_erratum_27456_cpus))
1315 			return false;
1316 	}
1317 
1318 	return kaslr_offset() > 0;
1319 }
1320 
1321 static bool __meltdown_safe = true;
1322 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
1323 
unmap_kernel_at_el0(const struct arm64_cpu_capabilities * entry,int scope)1324 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
1325 				int scope)
1326 {
1327 	/* List of CPUs that are not vulnerable and don't need KPTI */
1328 	static const struct midr_range kpti_safe_list[] = {
1329 		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1330 		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1331 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
1332 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
1333 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
1334 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1335 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1336 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1337 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1338 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
1339 		MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
1340 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
1341 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
1342 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
1343 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
1344 		{ /* sentinel */ }
1345 	};
1346 	char const *str = "kpti command line option";
1347 	bool meltdown_safe;
1348 
1349 	meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
1350 
1351 	/* Defer to CPU feature registers */
1352 	if (has_cpuid_feature(entry, scope))
1353 		meltdown_safe = true;
1354 
1355 	if (!meltdown_safe)
1356 		__meltdown_safe = false;
1357 
1358 	/*
1359 	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
1360 	 * ThunderX leads to apparent I-cache corruption of kernel text, which
1361 	 * ends as well as you might imagine. Don't even try.
1362 	 */
1363 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
1364 		str = "ARM64_WORKAROUND_CAVIUM_27456";
1365 		__kpti_forced = -1;
1366 	}
1367 
1368 	/* Useful for KASLR robustness */
1369 	if (kaslr_requires_kpti()) {
1370 		if (!__kpti_forced) {
1371 			str = "KASLR";
1372 			__kpti_forced = 1;
1373 		}
1374 	}
1375 
1376 	if (cpu_mitigations_off() && !__kpti_forced) {
1377 		str = "mitigations=off";
1378 		__kpti_forced = -1;
1379 	}
1380 
1381 	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
1382 		pr_info_once("kernel page table isolation disabled by kernel configuration\n");
1383 		return false;
1384 	}
1385 
1386 	/* Forced? */
1387 	if (__kpti_forced) {
1388 		pr_info_once("kernel page table isolation forced %s by %s\n",
1389 			     __kpti_forced > 0 ? "ON" : "OFF", str);
1390 		return __kpti_forced > 0;
1391 	}
1392 
1393 	return !meltdown_safe;
1394 }
1395 
1396 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1397 static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities * __unused)1398 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1399 {
1400 	typedef void (kpti_remap_fn)(int, int, phys_addr_t);
1401 	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
1402 	kpti_remap_fn *remap_fn;
1403 
1404 	int cpu = smp_processor_id();
1405 
1406 	/*
1407 	 * We don't need to rewrite the page-tables if either we've done
1408 	 * it already or we have KASLR enabled and therefore have not
1409 	 * created any global mappings at all.
1410 	 */
1411 	if (arm64_use_ng_mappings)
1412 		return;
1413 
1414 	remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
1415 
1416 	cpu_install_idmap();
1417 	remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
1418 	cpu_uninstall_idmap();
1419 
1420 	if (!cpu)
1421 		arm64_use_ng_mappings = true;
1422 
1423 	return;
1424 }
1425 #else
1426 static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities * __unused)1427 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1428 {
1429 }
1430 #endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
1431 
parse_kpti(char * str)1432 static int __init parse_kpti(char *str)
1433 {
1434 	bool enabled;
1435 	int ret = strtobool(str, &enabled);
1436 
1437 	if (ret)
1438 		return ret;
1439 
1440 	__kpti_forced = enabled ? 1 : -1;
1441 	return 0;
1442 }
1443 early_param("kpti", parse_kpti);
1444 
1445 #ifdef CONFIG_ARM64_HW_AFDBM
__cpu_enable_hw_dbm(void)1446 static inline void __cpu_enable_hw_dbm(void)
1447 {
1448 	u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
1449 
1450 	write_sysreg(tcr, tcr_el1);
1451 	isb();
1452 	local_flush_tlb_all();
1453 }
1454 
cpu_has_broken_dbm(void)1455 static bool cpu_has_broken_dbm(void)
1456 {
1457 	/* List of CPUs which have broken DBM support. */
1458 	static const struct midr_range cpus[] = {
1459 #ifdef CONFIG_ARM64_ERRATUM_1024718
1460 		MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
1461 		/* Kryo4xx Silver (rdpe => r1p0) */
1462 		MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
1463 #endif
1464 		{},
1465 	};
1466 
1467 	return is_midr_in_range_list(read_cpuid_id(), cpus);
1468 }
1469 
cpu_can_use_dbm(const struct arm64_cpu_capabilities * cap)1470 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
1471 {
1472 	return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
1473 	       !cpu_has_broken_dbm();
1474 }
1475 
cpu_enable_hw_dbm(struct arm64_cpu_capabilities const * cap)1476 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
1477 {
1478 	if (cpu_can_use_dbm(cap))
1479 		__cpu_enable_hw_dbm();
1480 }
1481 
has_hw_dbm(const struct arm64_cpu_capabilities * cap,int __unused)1482 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
1483 		       int __unused)
1484 {
1485 	static bool detected = false;
1486 	/*
1487 	 * DBM is a non-conflicting feature. i.e, the kernel can safely
1488 	 * run a mix of CPUs with and without the feature. So, we
1489 	 * unconditionally enable the capability to allow any late CPU
1490 	 * to use the feature. We only enable the control bits on the
1491 	 * CPU, if it actually supports.
1492 	 *
1493 	 * We have to make sure we print the "feature" detection only
1494 	 * when at least one CPU actually uses it. So check if this CPU
1495 	 * can actually use it and print the message exactly once.
1496 	 *
1497 	 * This is safe as all CPUs (including secondary CPUs - due to the
1498 	 * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
1499 	 * goes through the "matches" check exactly once. Also if a CPU
1500 	 * matches the criteria, it is guaranteed that the CPU will turn
1501 	 * the DBM on, as the capability is unconditionally enabled.
1502 	 */
1503 	if (!detected && cpu_can_use_dbm(cap)) {
1504 		detected = true;
1505 		pr_info("detected: Hardware dirty bit management\n");
1506 	}
1507 
1508 	return true;
1509 }
1510 
1511 #endif
1512 
1513 #ifdef CONFIG_ARM64_AMU_EXTN
1514 
1515 /*
1516  * The "amu_cpus" cpumask only signals that the CPU implementation for the
1517  * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide
1518  * information regarding all the events that it supports. When a CPU bit is
1519  * set in the cpumask, the user of this feature can only rely on the presence
1520  * of the 4 fixed counters for that CPU. But this does not guarantee that the
1521  * counters are enabled or access to these counters is enabled by code
1522  * executed at higher exception levels (firmware).
1523  */
1524 static struct cpumask amu_cpus __read_mostly;
1525 
cpu_has_amu_feat(int cpu)1526 bool cpu_has_amu_feat(int cpu)
1527 {
1528 	return cpumask_test_cpu(cpu, &amu_cpus);
1529 }
1530 
1531 /* Initialize the use of AMU counters for frequency invariance */
1532 extern void init_cpu_freq_invariance_counters(void);
1533 
cpu_amu_enable(struct arm64_cpu_capabilities const * cap)1534 static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
1535 {
1536 	if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
1537 		pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
1538 			smp_processor_id());
1539 		cpumask_set_cpu(smp_processor_id(), &amu_cpus);
1540 		init_cpu_freq_invariance_counters();
1541 	}
1542 }
1543 
has_amu(const struct arm64_cpu_capabilities * cap,int __unused)1544 static bool has_amu(const struct arm64_cpu_capabilities *cap,
1545 		    int __unused)
1546 {
1547 	/*
1548 	 * The AMU extension is a non-conflicting feature: the kernel can
1549 	 * safely run a mix of CPUs with and without support for the
1550 	 * activity monitors extension. Therefore, unconditionally enable
1551 	 * the capability to allow any late CPU to use the feature.
1552 	 *
1553 	 * With this feature unconditionally enabled, the cpu_enable
1554 	 * function will be called for all CPUs that match the criteria,
1555 	 * including secondary and hotplugged, marking this feature as
1556 	 * present on that respective CPU. The enable function will also
1557 	 * print a detection message.
1558 	 */
1559 
1560 	return true;
1561 }
1562 #endif
1563 
1564 #ifdef CONFIG_ARM64_VHE
runs_at_el2(const struct arm64_cpu_capabilities * entry,int __unused)1565 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1566 {
1567 	return is_kernel_in_hyp_mode();
1568 }
1569 
cpu_copy_el2regs(const struct arm64_cpu_capabilities * __unused)1570 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
1571 {
1572 	/*
1573 	 * Copy register values that aren't redirected by hardware.
1574 	 *
1575 	 * Before code patching, we only set tpidr_el1, all CPUs need to copy
1576 	 * this value to tpidr_el2 before we patch the code. Once we've done
1577 	 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1578 	 * do anything here.
1579 	 */
1580 	if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
1581 		write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1582 }
1583 #endif
1584 
cpu_has_fwb(const struct arm64_cpu_capabilities * __unused)1585 static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
1586 {
1587 	u64 val = read_sysreg_s(SYS_CLIDR_EL1);
1588 
1589 	/* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
1590 	WARN_ON(val & (7 << 27 | 7 << 21));
1591 }
1592 
1593 #ifdef CONFIG_ARM64_PAN
cpu_enable_pan(const struct arm64_cpu_capabilities * __unused)1594 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
1595 {
1596 	/*
1597 	 * We modify PSTATE. This won't work from irq context as the PSTATE
1598 	 * is discarded once we return from the exception.
1599 	 */
1600 	WARN_ON_ONCE(in_interrupt());
1601 
1602 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
1603 	asm(SET_PSTATE_PAN(1));
1604 }
1605 #endif /* CONFIG_ARM64_PAN */
1606 
1607 #ifdef CONFIG_ARM64_RAS_EXTN
cpu_clear_disr(const struct arm64_cpu_capabilities * __unused)1608 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1609 {
1610 	/* Firmware may have left a deferred SError in this register. */
1611 	write_sysreg_s(0, SYS_DISR_EL1);
1612 }
1613 #endif /* CONFIG_ARM64_RAS_EXTN */
1614 
1615 #ifdef CONFIG_ARM64_PTR_AUTH
has_address_auth_cpucap(const struct arm64_cpu_capabilities * entry,int scope)1616 static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
1617 {
1618 	int boot_val, sec_val;
1619 
1620 	/* We don't expect to be called with SCOPE_SYSTEM */
1621 	WARN_ON(scope == SCOPE_SYSTEM);
1622 	/*
1623 	 * The ptr-auth feature levels are not intercompatible with lower
1624 	 * levels. Hence we must match ptr-auth feature level of the secondary
1625 	 * CPUs with that of the boot CPU. The level of boot cpu is fetched
1626 	 * from the sanitised register whereas direct register read is done for
1627 	 * the secondary CPUs.
1628 	 * The sanitised feature state is guaranteed to match that of the
1629 	 * boot CPU as a mismatched secondary CPU is parked before it gets
1630 	 * a chance to update the state, with the capability.
1631 	 */
1632 	boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
1633 					       entry->field_pos, entry->sign);
1634 	if (scope & SCOPE_BOOT_CPU)
1635 		return boot_val >= entry->min_field_value;
1636 	/* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */
1637 	sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
1638 					      entry->field_pos, entry->sign);
1639 	return sec_val == boot_val;
1640 }
1641 
has_address_auth_metacap(const struct arm64_cpu_capabilities * entry,int scope)1642 static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
1643 				     int scope)
1644 {
1645 	return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
1646 	       has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
1647 }
1648 
has_generic_auth(const struct arm64_cpu_capabilities * entry,int __unused)1649 static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
1650 			     int __unused)
1651 {
1652 	return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
1653 	       __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
1654 }
1655 #endif /* CONFIG_ARM64_PTR_AUTH */
1656 
1657 #ifdef CONFIG_ARM64_E0PD
cpu_enable_e0pd(struct arm64_cpu_capabilities const * cap)1658 static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
1659 {
1660 	if (this_cpu_has_cap(ARM64_HAS_E0PD))
1661 		sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
1662 }
1663 #endif /* CONFIG_ARM64_E0PD */
1664 
1665 #ifdef CONFIG_ARM64_PSEUDO_NMI
1666 static bool enable_pseudo_nmi;
1667 
early_enable_pseudo_nmi(char * p)1668 static int __init early_enable_pseudo_nmi(char *p)
1669 {
1670 	return strtobool(p, &enable_pseudo_nmi);
1671 }
1672 early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
1673 
can_use_gic_priorities(const struct arm64_cpu_capabilities * entry,int scope)1674 static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
1675 				   int scope)
1676 {
1677 	return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
1678 }
1679 #endif
1680 
1681 #ifdef CONFIG_ARM64_BTI
bti_enable(const struct arm64_cpu_capabilities * __unused)1682 static void bti_enable(const struct arm64_cpu_capabilities *__unused)
1683 {
1684 	/*
1685 	 * Use of X16/X17 for tail-calls and trampolines that jump to
1686 	 * function entry points using BR is a requirement for
1687 	 * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI.
1688 	 * So, be strict and forbid other BRs using other registers to
1689 	 * jump onto a PACIxSP instruction:
1690 	 */
1691 	sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
1692 	isb();
1693 }
1694 #endif /* CONFIG_ARM64_BTI */
1695 
1696 #ifdef CONFIG_ARM64_MTE
cpu_enable_mte(struct arm64_cpu_capabilities const * cap)1697 static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
1698 {
1699 	static bool cleared_zero_page = false;
1700 
1701 	/*
1702 	 * Clear the tags in the zero page. This needs to be done via the
1703 	 * linear map which has the Tagged attribute.
1704 	 */
1705 	if (!cleared_zero_page) {
1706 		cleared_zero_page = true;
1707 		mte_clear_page_tags(lm_alias(empty_zero_page));
1708 	}
1709 }
1710 #endif /* CONFIG_ARM64_MTE */
1711 
1712 /* Internal helper functions to match cpu capability type */
1713 static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities * cap)1714 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
1715 {
1716 	return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
1717 }
1718 
1719 static bool
cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities * cap)1720 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
1721 {
1722 	return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
1723 }
1724 
1725 static bool
cpucap_panic_on_conflict(const struct arm64_cpu_capabilities * cap)1726 cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
1727 {
1728 	return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
1729 }
1730 
1731 static const struct arm64_cpu_capabilities arm64_features[] = {
1732 	{
1733 		.desc = "GIC system register CPU interface",
1734 		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1735 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1736 		.matches = has_useable_gicv3_cpuif,
1737 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1738 		.field_pos = ID_AA64PFR0_GIC_SHIFT,
1739 		.sign = FTR_UNSIGNED,
1740 		.min_field_value = 1,
1741 	},
1742 #ifdef CONFIG_ARM64_PAN
1743 	{
1744 		.desc = "Privileged Access Never",
1745 		.capability = ARM64_HAS_PAN,
1746 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1747 		.matches = has_cpuid_feature,
1748 		.sys_reg = SYS_ID_AA64MMFR1_EL1,
1749 		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
1750 		.sign = FTR_UNSIGNED,
1751 		.min_field_value = 1,
1752 		.cpu_enable = cpu_enable_pan,
1753 	},
1754 #endif /* CONFIG_ARM64_PAN */
1755 #ifdef CONFIG_ARM64_LSE_ATOMICS
1756 	{
1757 		.desc = "LSE atomic instructions",
1758 		.capability = ARM64_HAS_LSE_ATOMICS,
1759 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1760 		.matches = has_cpuid_feature,
1761 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
1762 		.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
1763 		.sign = FTR_UNSIGNED,
1764 		.min_field_value = 2,
1765 	},
1766 #endif /* CONFIG_ARM64_LSE_ATOMICS */
1767 	{
1768 		.desc = "Software prefetching using PRFM",
1769 		.capability = ARM64_HAS_NO_HW_PREFETCH,
1770 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1771 		.matches = has_no_hw_prefetch,
1772 	},
1773 #ifdef CONFIG_ARM64_UAO
1774 	{
1775 		.desc = "User Access Override",
1776 		.capability = ARM64_HAS_UAO,
1777 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1778 		.matches = has_cpuid_feature,
1779 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
1780 		.field_pos = ID_AA64MMFR2_UAO_SHIFT,
1781 		.min_field_value = 1,
1782 		/*
1783 		 * We rely on stop_machine() calling uao_thread_switch() to set
1784 		 * UAO immediately after patching.
1785 		 */
1786 	},
1787 #endif /* CONFIG_ARM64_UAO */
1788 #ifdef CONFIG_ARM64_PAN
1789 	{
1790 		.capability = ARM64_ALT_PAN_NOT_UAO,
1791 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1792 		.matches = cpufeature_pan_not_uao,
1793 	},
1794 #endif /* CONFIG_ARM64_PAN */
1795 #ifdef CONFIG_ARM64_VHE
1796 	{
1797 		.desc = "Virtualization Host Extensions",
1798 		.capability = ARM64_HAS_VIRT_HOST_EXTN,
1799 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1800 		.matches = runs_at_el2,
1801 		.cpu_enable = cpu_copy_el2regs,
1802 	},
1803 #endif	/* CONFIG_ARM64_VHE */
1804 	{
1805 		.desc = "32-bit EL0 Support",
1806 		.capability = ARM64_HAS_32BIT_EL0,
1807 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1808 		.matches = has_cpuid_feature,
1809 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1810 		.sign = FTR_UNSIGNED,
1811 		.field_pos = ID_AA64PFR0_EL0_SHIFT,
1812 		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
1813 	},
1814 #ifdef CONFIG_KVM
1815 	{
1816 		.desc = "32-bit EL1 Support",
1817 		.capability = ARM64_HAS_32BIT_EL1,
1818 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1819 		.matches = has_cpuid_feature,
1820 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1821 		.sign = FTR_UNSIGNED,
1822 		.field_pos = ID_AA64PFR0_EL1_SHIFT,
1823 		.min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
1824 	},
1825 #endif
1826 	{
1827 		.desc = "Kernel page table isolation (KPTI)",
1828 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
1829 		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1830 		/*
1831 		 * The ID feature fields below are used to indicate that
1832 		 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1833 		 * more details.
1834 		 */
1835 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1836 		.field_pos = ID_AA64PFR0_CSV3_SHIFT,
1837 		.min_field_value = 1,
1838 		.matches = unmap_kernel_at_el0,
1839 		.cpu_enable = kpti_install_ng_mappings,
1840 	},
1841 	{
1842 		/* FP/SIMD is not implemented */
1843 		.capability = ARM64_HAS_NO_FPSIMD,
1844 		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1845 		.min_field_value = 0,
1846 		.matches = has_no_fpsimd,
1847 	},
1848 #ifdef CONFIG_ARM64_PMEM
1849 	{
1850 		.desc = "Data cache clean to Point of Persistence",
1851 		.capability = ARM64_HAS_DCPOP,
1852 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1853 		.matches = has_cpuid_feature,
1854 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
1855 		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
1856 		.min_field_value = 1,
1857 	},
1858 	{
1859 		.desc = "Data cache clean to Point of Deep Persistence",
1860 		.capability = ARM64_HAS_DCPODP,
1861 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1862 		.matches = has_cpuid_feature,
1863 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
1864 		.sign = FTR_UNSIGNED,
1865 		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
1866 		.min_field_value = 2,
1867 	},
1868 #endif
1869 #ifdef CONFIG_ARM64_SVE
1870 	{
1871 		.desc = "Scalable Vector Extension",
1872 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1873 		.capability = ARM64_SVE,
1874 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1875 		.sign = FTR_UNSIGNED,
1876 		.field_pos = ID_AA64PFR0_SVE_SHIFT,
1877 		.min_field_value = ID_AA64PFR0_SVE,
1878 		.matches = has_cpuid_feature,
1879 		.cpu_enable = sve_kernel_enable,
1880 	},
1881 #endif /* CONFIG_ARM64_SVE */
1882 #ifdef CONFIG_ARM64_RAS_EXTN
1883 	{
1884 		.desc = "RAS Extension Support",
1885 		.capability = ARM64_HAS_RAS_EXTN,
1886 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1887 		.matches = has_cpuid_feature,
1888 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1889 		.sign = FTR_UNSIGNED,
1890 		.field_pos = ID_AA64PFR0_RAS_SHIFT,
1891 		.min_field_value = ID_AA64PFR0_RAS_V1,
1892 		.cpu_enable = cpu_clear_disr,
1893 	},
1894 #endif /* CONFIG_ARM64_RAS_EXTN */
1895 #ifdef CONFIG_ARM64_AMU_EXTN
1896 	{
1897 		/*
1898 		 * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
1899 		 * Therefore, don't provide .desc as we don't want the detection
1900 		 * message to be shown until at least one CPU is detected to
1901 		 * support the feature.
1902 		 */
1903 		.capability = ARM64_HAS_AMU_EXTN,
1904 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1905 		.matches = has_amu,
1906 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1907 		.sign = FTR_UNSIGNED,
1908 		.field_pos = ID_AA64PFR0_AMU_SHIFT,
1909 		.min_field_value = ID_AA64PFR0_AMU,
1910 		.cpu_enable = cpu_amu_enable,
1911 	},
1912 #endif /* CONFIG_ARM64_AMU_EXTN */
1913 	{
1914 		.desc = "Data cache clean to the PoU not required for I/D coherence",
1915 		.capability = ARM64_HAS_CACHE_IDC,
1916 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1917 		.matches = has_cache_idc,
1918 		.cpu_enable = cpu_emulate_effective_ctr,
1919 	},
1920 	{
1921 		.desc = "Instruction cache invalidation not required for I/D coherence",
1922 		.capability = ARM64_HAS_CACHE_DIC,
1923 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1924 		.matches = has_cache_dic,
1925 	},
1926 	{
1927 		.desc = "Stage-2 Force Write-Back",
1928 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1929 		.capability = ARM64_HAS_STAGE2_FWB,
1930 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
1931 		.sign = FTR_UNSIGNED,
1932 		.field_pos = ID_AA64MMFR2_FWB_SHIFT,
1933 		.min_field_value = 1,
1934 		.matches = has_cpuid_feature,
1935 		.cpu_enable = cpu_has_fwb,
1936 	},
1937 	{
1938 		.desc = "ARMv8.4 Translation Table Level",
1939 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1940 		.capability = ARM64_HAS_ARMv8_4_TTL,
1941 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
1942 		.sign = FTR_UNSIGNED,
1943 		.field_pos = ID_AA64MMFR2_TTL_SHIFT,
1944 		.min_field_value = 1,
1945 		.matches = has_cpuid_feature,
1946 	},
1947 	{
1948 		.desc = "TLB range maintenance instructions",
1949 		.capability = ARM64_HAS_TLB_RANGE,
1950 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1951 		.matches = has_cpuid_feature,
1952 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
1953 		.field_pos = ID_AA64ISAR0_TLB_SHIFT,
1954 		.sign = FTR_UNSIGNED,
1955 		.min_field_value = ID_AA64ISAR0_TLB_RANGE,
1956 	},
1957 #ifdef CONFIG_ARM64_HW_AFDBM
1958 	{
1959 		/*
1960 		 * Since we turn this on always, we don't want the user to
1961 		 * think that the feature is available when it may not be.
1962 		 * So hide the description.
1963 		 *
1964 		 * .desc = "Hardware pagetable Dirty Bit Management",
1965 		 *
1966 		 */
1967 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1968 		.capability = ARM64_HW_DBM,
1969 		.sys_reg = SYS_ID_AA64MMFR1_EL1,
1970 		.sign = FTR_UNSIGNED,
1971 		.field_pos = ID_AA64MMFR1_HADBS_SHIFT,
1972 		.min_field_value = 2,
1973 		.matches = has_hw_dbm,
1974 		.cpu_enable = cpu_enable_hw_dbm,
1975 	},
1976 #endif
1977 	{
1978 		.desc = "CRC32 instructions",
1979 		.capability = ARM64_HAS_CRC32,
1980 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1981 		.matches = has_cpuid_feature,
1982 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
1983 		.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
1984 		.min_field_value = 1,
1985 	},
1986 	{
1987 		.desc = "Speculative Store Bypassing Safe (SSBS)",
1988 		.capability = ARM64_SSBS,
1989 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1990 		.matches = has_cpuid_feature,
1991 		.sys_reg = SYS_ID_AA64PFR1_EL1,
1992 		.field_pos = ID_AA64PFR1_SSBS_SHIFT,
1993 		.sign = FTR_UNSIGNED,
1994 		.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
1995 	},
1996 #ifdef CONFIG_ARM64_CNP
1997 	{
1998 		.desc = "Common not Private translations",
1999 		.capability = ARM64_HAS_CNP,
2000 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2001 		.matches = has_useable_cnp,
2002 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
2003 		.sign = FTR_UNSIGNED,
2004 		.field_pos = ID_AA64MMFR2_CNP_SHIFT,
2005 		.min_field_value = 1,
2006 		.cpu_enable = cpu_enable_cnp,
2007 	},
2008 #endif
2009 	{
2010 		.desc = "Speculation barrier (SB)",
2011 		.capability = ARM64_HAS_SB,
2012 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2013 		.matches = has_cpuid_feature,
2014 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2015 		.field_pos = ID_AA64ISAR1_SB_SHIFT,
2016 		.sign = FTR_UNSIGNED,
2017 		.min_field_value = 1,
2018 	},
2019 #ifdef CONFIG_ARM64_PTR_AUTH
2020 	{
2021 		.desc = "Address authentication (architected algorithm)",
2022 		.capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
2023 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2024 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2025 		.sign = FTR_UNSIGNED,
2026 		.field_pos = ID_AA64ISAR1_APA_SHIFT,
2027 		.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
2028 		.matches = has_address_auth_cpucap,
2029 	},
2030 	{
2031 		.desc = "Address authentication (IMP DEF algorithm)",
2032 		.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
2033 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2034 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2035 		.sign = FTR_UNSIGNED,
2036 		.field_pos = ID_AA64ISAR1_API_SHIFT,
2037 		.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
2038 		.matches = has_address_auth_cpucap,
2039 	},
2040 	{
2041 		.capability = ARM64_HAS_ADDRESS_AUTH,
2042 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2043 		.matches = has_address_auth_metacap,
2044 	},
2045 	{
2046 		.desc = "Generic authentication (architected algorithm)",
2047 		.capability = ARM64_HAS_GENERIC_AUTH_ARCH,
2048 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2049 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2050 		.sign = FTR_UNSIGNED,
2051 		.field_pos = ID_AA64ISAR1_GPA_SHIFT,
2052 		.min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
2053 		.matches = has_cpuid_feature,
2054 	},
2055 	{
2056 		.desc = "Generic authentication (IMP DEF algorithm)",
2057 		.capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
2058 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2059 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2060 		.sign = FTR_UNSIGNED,
2061 		.field_pos = ID_AA64ISAR1_GPI_SHIFT,
2062 		.min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
2063 		.matches = has_cpuid_feature,
2064 	},
2065 	{
2066 		.capability = ARM64_HAS_GENERIC_AUTH,
2067 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2068 		.matches = has_generic_auth,
2069 	},
2070 #endif /* CONFIG_ARM64_PTR_AUTH */
2071 #ifdef CONFIG_ARM64_PSEUDO_NMI
2072 	{
2073 		/*
2074 		 * Depends on having GICv3
2075 		 */
2076 		.desc = "IRQ priority masking",
2077 		.capability = ARM64_HAS_IRQ_PRIO_MASKING,
2078 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2079 		.matches = can_use_gic_priorities,
2080 		.sys_reg = SYS_ID_AA64PFR0_EL1,
2081 		.field_pos = ID_AA64PFR0_GIC_SHIFT,
2082 		.sign = FTR_UNSIGNED,
2083 		.min_field_value = 1,
2084 	},
2085 #endif
2086 #ifdef CONFIG_ARM64_E0PD
2087 	{
2088 		.desc = "E0PD",
2089 		.capability = ARM64_HAS_E0PD,
2090 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2091 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
2092 		.sign = FTR_UNSIGNED,
2093 		.field_pos = ID_AA64MMFR2_E0PD_SHIFT,
2094 		.matches = has_cpuid_feature,
2095 		.min_field_value = 1,
2096 		.cpu_enable = cpu_enable_e0pd,
2097 	},
2098 #endif
2099 #ifdef CONFIG_ARCH_RANDOM
2100 	{
2101 		.desc = "Random Number Generator",
2102 		.capability = ARM64_HAS_RNG,
2103 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2104 		.matches = has_cpuid_feature,
2105 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
2106 		.field_pos = ID_AA64ISAR0_RNDR_SHIFT,
2107 		.sign = FTR_UNSIGNED,
2108 		.min_field_value = 1,
2109 	},
2110 #endif
2111 #ifdef CONFIG_ARM64_BTI
2112 	{
2113 		.desc = "Branch Target Identification",
2114 		.capability = ARM64_BTI,
2115 #ifdef CONFIG_ARM64_BTI_KERNEL
2116 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2117 #else
2118 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2119 #endif
2120 		.matches = has_cpuid_feature,
2121 		.cpu_enable = bti_enable,
2122 		.sys_reg = SYS_ID_AA64PFR1_EL1,
2123 		.field_pos = ID_AA64PFR1_BT_SHIFT,
2124 		.min_field_value = ID_AA64PFR1_BT_BTI,
2125 		.sign = FTR_UNSIGNED,
2126 	},
2127 #endif
2128 #ifdef CONFIG_ARM64_MTE
2129 	{
2130 		.desc = "Memory Tagging Extension",
2131 		.capability = ARM64_MTE,
2132 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2133 		.matches = has_cpuid_feature,
2134 		.sys_reg = SYS_ID_AA64PFR1_EL1,
2135 		.field_pos = ID_AA64PFR1_MTE_SHIFT,
2136 		.min_field_value = ID_AA64PFR1_MTE,
2137 		.sign = FTR_UNSIGNED,
2138 		.cpu_enable = cpu_enable_mte,
2139 	},
2140 #endif /* CONFIG_ARM64_MTE */
2141 	{},
2142 };
2143 
2144 #define HWCAP_CPUID_MATCH(reg, field, s, min_value)				\
2145 		.matches = has_cpuid_feature,					\
2146 		.sys_reg = reg,							\
2147 		.field_pos = field,						\
2148 		.sign = s,							\
2149 		.min_field_value = min_value,
2150 
2151 #define __HWCAP_CAP(name, cap_type, cap)					\
2152 		.desc = name,							\
2153 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,				\
2154 		.hwcap_type = cap_type,						\
2155 		.hwcap = cap,							\
2156 
2157 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)			\
2158 	{									\
2159 		__HWCAP_CAP(#cap, cap_type, cap)				\
2160 		HWCAP_CPUID_MATCH(reg, field, s, min_value)			\
2161 	}
2162 
2163 #define HWCAP_MULTI_CAP(list, cap_type, cap)					\
2164 	{									\
2165 		__HWCAP_CAP(#cap, cap_type, cap)				\
2166 		.matches = cpucap_multi_entry_cap_matches,			\
2167 		.match_list = list,						\
2168 	}
2169 
2170 #define HWCAP_CAP_MATCH(match, cap_type, cap)					\
2171 	{									\
2172 		__HWCAP_CAP(#cap, cap_type, cap)				\
2173 		.matches = match,						\
2174 	}
2175 
2176 #ifdef CONFIG_ARM64_PTR_AUTH
2177 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
2178 	{
2179 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
2180 				  FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
2181 	},
2182 	{
2183 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
2184 				  FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
2185 	},
2186 	{},
2187 };
2188 
2189 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
2190 	{
2191 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
2192 				  FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
2193 	},
2194 	{
2195 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
2196 				  FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
2197 	},
2198 	{},
2199 };
2200 #endif
2201 
2202 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
2203 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
2204 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
2205 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
2206 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
2207 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
2208 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
2209 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
2210 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
2211 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
2212 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
2213 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
2214 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
2215 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
2216 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
2217 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
2218 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
2219 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
2220 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
2221 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
2222 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
2223 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
2224 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
2225 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
2226 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
2227 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
2228 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
2229 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
2230 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
2231 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
2232 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
2233 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
2234 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
2235 	HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
2236 #ifdef CONFIG_ARM64_SVE
2237 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
2238 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
2239 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
2240 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
2241 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
2242 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
2243 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
2244 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
2245 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
2246 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
2247 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
2248 #endif
2249 	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
2250 #ifdef CONFIG_ARM64_BTI
2251 	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
2252 #endif
2253 #ifdef CONFIG_ARM64_PTR_AUTH
2254 	HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
2255 	HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
2256 #endif
2257 #ifdef CONFIG_ARM64_MTE
2258 	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
2259 #endif /* CONFIG_ARM64_MTE */
2260 	{},
2261 };
2262 
2263 #ifdef CONFIG_COMPAT
compat_has_neon(const struct arm64_cpu_capabilities * cap,int scope)2264 static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
2265 {
2266 	/*
2267 	 * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
2268 	 * in line with that of arm32 as in vfp_init(). We make sure that the
2269 	 * check is future proof, by making sure value is non-zero.
2270 	 */
2271 	u32 mvfr1;
2272 
2273 	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
2274 	if (scope == SCOPE_SYSTEM)
2275 		mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
2276 	else
2277 		mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
2278 
2279 	return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
2280 		cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
2281 		cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
2282 }
2283 #endif
2284 
2285 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
2286 #ifdef CONFIG_COMPAT
2287 	HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
2288 	HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
2289 	/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
2290 	HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
2291 	HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
2292 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
2293 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
2294 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
2295 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
2296 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
2297 #endif
2298 	{},
2299 };
2300 
cap_set_elf_hwcap(const struct arm64_cpu_capabilities * cap)2301 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
2302 {
2303 	switch (cap->hwcap_type) {
2304 	case CAP_HWCAP:
2305 		cpu_set_feature(cap->hwcap);
2306 		break;
2307 #ifdef CONFIG_COMPAT
2308 	case CAP_COMPAT_HWCAP:
2309 		compat_elf_hwcap |= (u32)cap->hwcap;
2310 		break;
2311 	case CAP_COMPAT_HWCAP2:
2312 		compat_elf_hwcap2 |= (u32)cap->hwcap;
2313 		break;
2314 #endif
2315 	default:
2316 		WARN_ON(1);
2317 		break;
2318 	}
2319 }
2320 
2321 /* Check if we have a particular HWCAP enabled */
cpus_have_elf_hwcap(const struct arm64_cpu_capabilities * cap)2322 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
2323 {
2324 	bool rc;
2325 
2326 	switch (cap->hwcap_type) {
2327 	case CAP_HWCAP:
2328 		rc = cpu_have_feature(cap->hwcap);
2329 		break;
2330 #ifdef CONFIG_COMPAT
2331 	case CAP_COMPAT_HWCAP:
2332 		rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
2333 		break;
2334 	case CAP_COMPAT_HWCAP2:
2335 		rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
2336 		break;
2337 #endif
2338 	default:
2339 		WARN_ON(1);
2340 		rc = false;
2341 	}
2342 
2343 	return rc;
2344 }
2345 
setup_elf_hwcaps(const struct arm64_cpu_capabilities * hwcaps)2346 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
2347 {
2348 	/* We support emulation of accesses to CPU ID feature registers */
2349 	cpu_set_named_feature(CPUID);
2350 	for (; hwcaps->matches; hwcaps++)
2351 		if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
2352 			cap_set_elf_hwcap(hwcaps);
2353 }
2354 
update_cpu_capabilities(u16 scope_mask)2355 static void update_cpu_capabilities(u16 scope_mask)
2356 {
2357 	int i;
2358 	const struct arm64_cpu_capabilities *caps;
2359 
2360 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2361 	for (i = 0; i < ARM64_NCAPS; i++) {
2362 		caps = cpu_hwcaps_ptrs[i];
2363 		if (!caps || !(caps->type & scope_mask) ||
2364 		    cpus_have_cap(caps->capability) ||
2365 		    !caps->matches(caps, cpucap_default_scope(caps)))
2366 			continue;
2367 
2368 		if (caps->desc)
2369 			pr_info("detected: %s\n", caps->desc);
2370 		cpus_set_cap(caps->capability);
2371 
2372 		if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
2373 			set_bit(caps->capability, boot_capabilities);
2374 	}
2375 }
2376 
2377 /*
2378  * Enable all the available capabilities on this CPU. The capabilities
2379  * with BOOT_CPU scope are handled separately and hence skipped here.
2380  */
cpu_enable_non_boot_scope_capabilities(void * __unused)2381 static int cpu_enable_non_boot_scope_capabilities(void *__unused)
2382 {
2383 	int i;
2384 	u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
2385 
2386 	for_each_available_cap(i) {
2387 		const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
2388 
2389 		if (WARN_ON(!cap))
2390 			continue;
2391 
2392 		if (!(cap->type & non_boot_scope))
2393 			continue;
2394 
2395 		if (cap->cpu_enable)
2396 			cap->cpu_enable(cap);
2397 	}
2398 	return 0;
2399 }
2400 
2401 /*
2402  * Run through the enabled capabilities and enable() it on all active
2403  * CPUs
2404  */
enable_cpu_capabilities(u16 scope_mask)2405 static void __init enable_cpu_capabilities(u16 scope_mask)
2406 {
2407 	int i;
2408 	const struct arm64_cpu_capabilities *caps;
2409 	bool boot_scope;
2410 
2411 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2412 	boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
2413 
2414 	for (i = 0; i < ARM64_NCAPS; i++) {
2415 		unsigned int num;
2416 
2417 		caps = cpu_hwcaps_ptrs[i];
2418 		if (!caps || !(caps->type & scope_mask))
2419 			continue;
2420 		num = caps->capability;
2421 		if (!cpus_have_cap(num))
2422 			continue;
2423 
2424 		/* Ensure cpus_have_const_cap(num) works */
2425 		static_branch_enable(&cpu_hwcap_keys[num]);
2426 
2427 		if (boot_scope && caps->cpu_enable)
2428 			/*
2429 			 * Capabilities with SCOPE_BOOT_CPU scope are finalised
2430 			 * before any secondary CPU boots. Thus, each secondary
2431 			 * will enable the capability as appropriate via
2432 			 * check_local_cpu_capabilities(). The only exception is
2433 			 * the boot CPU, for which the capability must be
2434 			 * enabled here. This approach avoids costly
2435 			 * stop_machine() calls for this case.
2436 			 */
2437 			caps->cpu_enable(caps);
2438 	}
2439 
2440 	/*
2441 	 * For all non-boot scope capabilities, use stop_machine()
2442 	 * as it schedules the work allowing us to modify PSTATE,
2443 	 * instead of on_each_cpu() which uses an IPI, giving us a
2444 	 * PSTATE that disappears when we return.
2445 	 */
2446 	if (!boot_scope)
2447 		stop_machine(cpu_enable_non_boot_scope_capabilities,
2448 			     NULL, cpu_online_mask);
2449 }
2450 
2451 /*
2452  * Run through the list of capabilities to check for conflicts.
2453  * If the system has already detected a capability, take necessary
2454  * action on this CPU.
2455  */
verify_local_cpu_caps(u16 scope_mask)2456 static void verify_local_cpu_caps(u16 scope_mask)
2457 {
2458 	int i;
2459 	bool cpu_has_cap, system_has_cap;
2460 	const struct arm64_cpu_capabilities *caps;
2461 
2462 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2463 
2464 	for (i = 0; i < ARM64_NCAPS; i++) {
2465 		caps = cpu_hwcaps_ptrs[i];
2466 		if (!caps || !(caps->type & scope_mask))
2467 			continue;
2468 
2469 		cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
2470 		system_has_cap = cpus_have_cap(caps->capability);
2471 
2472 		if (system_has_cap) {
2473 			/*
2474 			 * Check if the new CPU misses an advertised feature,
2475 			 * which is not safe to miss.
2476 			 */
2477 			if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
2478 				break;
2479 			/*
2480 			 * We have to issue cpu_enable() irrespective of
2481 			 * whether the CPU has it or not, as it is enabeld
2482 			 * system wide. It is upto the call back to take
2483 			 * appropriate action on this CPU.
2484 			 */
2485 			if (caps->cpu_enable)
2486 				caps->cpu_enable(caps);
2487 		} else {
2488 			/*
2489 			 * Check if the CPU has this capability if it isn't
2490 			 * safe to have when the system doesn't.
2491 			 */
2492 			if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
2493 				break;
2494 		}
2495 	}
2496 
2497 	if (i < ARM64_NCAPS) {
2498 		pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
2499 			smp_processor_id(), caps->capability,
2500 			caps->desc, system_has_cap, cpu_has_cap);
2501 
2502 		if (cpucap_panic_on_conflict(caps))
2503 			cpu_panic_kernel();
2504 		else
2505 			cpu_die_early();
2506 	}
2507 }
2508 
2509 /*
2510  * Check for CPU features that are used in early boot
2511  * based on the Boot CPU value.
2512  */
check_early_cpu_features(void)2513 static void check_early_cpu_features(void)
2514 {
2515 	verify_cpu_asid_bits();
2516 
2517 	verify_local_cpu_caps(SCOPE_BOOT_CPU);
2518 }
2519 
2520 static void
verify_local_elf_hwcaps(const struct arm64_cpu_capabilities * caps)2521 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
2522 {
2523 
2524 	for (; caps->matches; caps++)
2525 		if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
2526 			pr_crit("CPU%d: missing HWCAP: %s\n",
2527 					smp_processor_id(), caps->desc);
2528 			cpu_die_early();
2529 		}
2530 }
2531 
verify_sve_features(void)2532 static void verify_sve_features(void)
2533 {
2534 	u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
2535 	u64 zcr = read_zcr_features();
2536 
2537 	unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
2538 	unsigned int len = zcr & ZCR_ELx_LEN_MASK;
2539 
2540 	if (len < safe_len || sve_verify_vq_map()) {
2541 		pr_crit("CPU%d: SVE: vector length support mismatch\n",
2542 			smp_processor_id());
2543 		cpu_die_early();
2544 	}
2545 
2546 	/* Add checks on other ZCR bits here if necessary */
2547 }
2548 
verify_hyp_capabilities(void)2549 static void verify_hyp_capabilities(void)
2550 {
2551 	u64 safe_mmfr1, mmfr0, mmfr1;
2552 	int parange, ipa_max;
2553 	unsigned int safe_vmid_bits, vmid_bits;
2554 
2555 	if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
2556 		return;
2557 
2558 	safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2559 	mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
2560 	mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
2561 
2562 	/* Verify VMID bits */
2563 	safe_vmid_bits = get_vmid_bits(safe_mmfr1);
2564 	vmid_bits = get_vmid_bits(mmfr1);
2565 	if (vmid_bits < safe_vmid_bits) {
2566 		pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id());
2567 		cpu_die_early();
2568 	}
2569 
2570 	/* Verify IPA range */
2571 	parange = cpuid_feature_extract_unsigned_field(mmfr0,
2572 				ID_AA64MMFR0_PARANGE_SHIFT);
2573 	ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
2574 	if (ipa_max < get_kvm_ipa_limit()) {
2575 		pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
2576 		cpu_die_early();
2577 	}
2578 }
2579 
2580 /*
2581  * Run through the enabled system capabilities and enable() it on this CPU.
2582  * The capabilities were decided based on the available CPUs at the boot time.
2583  * Any new CPU should match the system wide status of the capability. If the
2584  * new CPU doesn't have a capability which the system now has enabled, we
2585  * cannot do anything to fix it up and could cause unexpected failures. So
2586  * we park the CPU.
2587  */
verify_local_cpu_capabilities(void)2588 static void verify_local_cpu_capabilities(void)
2589 {
2590 	/*
2591 	 * The capabilities with SCOPE_BOOT_CPU are checked from
2592 	 * check_early_cpu_features(), as they need to be verified
2593 	 * on all secondary CPUs.
2594 	 */
2595 	verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
2596 
2597 	verify_local_elf_hwcaps(arm64_elf_hwcaps);
2598 
2599 	if (system_supports_32bit_el0())
2600 		verify_local_elf_hwcaps(compat_elf_hwcaps);
2601 
2602 	if (system_supports_sve())
2603 		verify_sve_features();
2604 
2605 	if (is_hyp_mode_available())
2606 		verify_hyp_capabilities();
2607 }
2608 
check_local_cpu_capabilities(void)2609 void check_local_cpu_capabilities(void)
2610 {
2611 	/*
2612 	 * All secondary CPUs should conform to the early CPU features
2613 	 * in use by the kernel based on boot CPU.
2614 	 */
2615 	check_early_cpu_features();
2616 
2617 	/*
2618 	 * If we haven't finalised the system capabilities, this CPU gets
2619 	 * a chance to update the errata work arounds and local features.
2620 	 * Otherwise, this CPU should verify that it has all the system
2621 	 * advertised capabilities.
2622 	 */
2623 	if (!system_capabilities_finalized())
2624 		update_cpu_capabilities(SCOPE_LOCAL_CPU);
2625 	else
2626 		verify_local_cpu_capabilities();
2627 }
2628 
setup_boot_cpu_capabilities(void)2629 static void __init setup_boot_cpu_capabilities(void)
2630 {
2631 	/* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
2632 	update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
2633 	/* Enable the SCOPE_BOOT_CPU capabilities alone right away */
2634 	enable_cpu_capabilities(SCOPE_BOOT_CPU);
2635 }
2636 
this_cpu_has_cap(unsigned int n)2637 bool this_cpu_has_cap(unsigned int n)
2638 {
2639 	if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
2640 		const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2641 
2642 		if (cap)
2643 			return cap->matches(cap, SCOPE_LOCAL_CPU);
2644 	}
2645 
2646 	return false;
2647 }
2648 
2649 /*
2650  * This helper function is used in a narrow window when,
2651  * - The system wide safe registers are set with all the SMP CPUs and,
2652  * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
2653  * In all other cases cpus_have_{const_}cap() should be used.
2654  */
__system_matches_cap(unsigned int n)2655 static bool __system_matches_cap(unsigned int n)
2656 {
2657 	if (n < ARM64_NCAPS) {
2658 		const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2659 
2660 		if (cap)
2661 			return cap->matches(cap, SCOPE_SYSTEM);
2662 	}
2663 	return false;
2664 }
2665 
cpu_set_feature(unsigned int num)2666 void cpu_set_feature(unsigned int num)
2667 {
2668 	WARN_ON(num >= MAX_CPU_FEATURES);
2669 	elf_hwcap |= BIT(num);
2670 }
2671 EXPORT_SYMBOL_GPL(cpu_set_feature);
2672 
cpu_have_feature(unsigned int num)2673 bool cpu_have_feature(unsigned int num)
2674 {
2675 	WARN_ON(num >= MAX_CPU_FEATURES);
2676 	return elf_hwcap & BIT(num);
2677 }
2678 EXPORT_SYMBOL_GPL(cpu_have_feature);
2679 
cpu_get_elf_hwcap(void)2680 unsigned long cpu_get_elf_hwcap(void)
2681 {
2682 	/*
2683 	 * We currently only populate the first 32 bits of AT_HWCAP. Please
2684 	 * note that for userspace compatibility we guarantee that bits 62
2685 	 * and 63 will always be returned as 0.
2686 	 */
2687 	return lower_32_bits(elf_hwcap);
2688 }
2689 
cpu_get_elf_hwcap2(void)2690 unsigned long cpu_get_elf_hwcap2(void)
2691 {
2692 	return upper_32_bits(elf_hwcap);
2693 }
2694 
setup_system_capabilities(void)2695 static void __init setup_system_capabilities(void)
2696 {
2697 	/*
2698 	 * We have finalised the system-wide safe feature
2699 	 * registers, finalise the capabilities that depend
2700 	 * on it. Also enable all the available capabilities,
2701 	 * that are not enabled already.
2702 	 */
2703 	update_cpu_capabilities(SCOPE_SYSTEM);
2704 	enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
2705 }
2706 
setup_cpu_features(void)2707 void __init setup_cpu_features(void)
2708 {
2709 	u32 cwg;
2710 
2711 	setup_system_capabilities();
2712 	setup_elf_hwcaps(arm64_elf_hwcaps);
2713 
2714 	if (system_supports_32bit_el0())
2715 		setup_elf_hwcaps(compat_elf_hwcaps);
2716 
2717 	if (system_uses_ttbr0_pan())
2718 		pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
2719 
2720 	sve_setup();
2721 	minsigstksz_setup();
2722 
2723 	/* Advertise that we have computed the system capabilities */
2724 	finalize_system_capabilities();
2725 
2726 	/*
2727 	 * Check for sane CTR_EL0.CWG value.
2728 	 */
2729 	cwg = cache_type_cwg();
2730 	if (!cwg)
2731 		pr_warn("No Cache Writeback Granule information, assuming %d\n",
2732 			ARCH_DMA_MINALIGN);
2733 }
2734 
2735 static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities * entry,int __unused)2736 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
2737 {
2738 	return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
2739 }
2740 
cpu_enable_cnp(struct arm64_cpu_capabilities const * cap)2741 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
2742 {
2743 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
2744 }
2745 
2746 /*
2747  * We emulate only the following system register space.
2748  * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
2749  * See Table C5-6 System instruction encodings for System register accesses,
2750  * ARMv8 ARM(ARM DDI 0487A.f) for more details.
2751  */
is_emulated(u32 id)2752 static inline bool __attribute_const__ is_emulated(u32 id)
2753 {
2754 	return (sys_reg_Op0(id) == 0x3 &&
2755 		sys_reg_CRn(id) == 0x0 &&
2756 		sys_reg_Op1(id) == 0x0 &&
2757 		(sys_reg_CRm(id) == 0 ||
2758 		 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
2759 }
2760 
2761 /*
2762  * With CRm == 0, reg should be one of :
2763  * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
2764  */
emulate_id_reg(u32 id,u64 * valp)2765 static inline int emulate_id_reg(u32 id, u64 *valp)
2766 {
2767 	switch (id) {
2768 	case SYS_MIDR_EL1:
2769 		*valp = read_cpuid_id();
2770 		break;
2771 	case SYS_MPIDR_EL1:
2772 		*valp = SYS_MPIDR_SAFE_VAL;
2773 		break;
2774 	case SYS_REVIDR_EL1:
2775 		/* IMPLEMENTATION DEFINED values are emulated with 0 */
2776 		*valp = 0;
2777 		break;
2778 	default:
2779 		return -EINVAL;
2780 	}
2781 
2782 	return 0;
2783 }
2784 
emulate_sys_reg(u32 id,u64 * valp)2785 static int emulate_sys_reg(u32 id, u64 *valp)
2786 {
2787 	struct arm64_ftr_reg *regp;
2788 
2789 	if (!is_emulated(id))
2790 		return -EINVAL;
2791 
2792 	if (sys_reg_CRm(id) == 0)
2793 		return emulate_id_reg(id, valp);
2794 
2795 	regp = get_arm64_ftr_reg_nowarn(id);
2796 	if (regp)
2797 		*valp = arm64_ftr_reg_user_value(regp);
2798 	else
2799 		/*
2800 		 * The untracked registers are either IMPLEMENTATION DEFINED
2801 		 * (e.g, ID_AFR0_EL1) or reserved RAZ.
2802 		 */
2803 		*valp = 0;
2804 	return 0;
2805 }
2806 
do_emulate_mrs(struct pt_regs * regs,u32 sys_reg,u32 rt)2807 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
2808 {
2809 	int rc;
2810 	u64 val;
2811 
2812 	rc = emulate_sys_reg(sys_reg, &val);
2813 	if (!rc) {
2814 		pt_regs_write_reg(regs, rt, val);
2815 		arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
2816 	}
2817 	return rc;
2818 }
2819 
emulate_mrs(struct pt_regs * regs,u32 insn)2820 static int emulate_mrs(struct pt_regs *regs, u32 insn)
2821 {
2822 	u32 sys_reg, rt;
2823 
2824 	/*
2825 	 * sys_reg values are defined as used in mrs/msr instruction.
2826 	 * shift the imm value to get the encoding.
2827 	 */
2828 	sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
2829 	rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
2830 	return do_emulate_mrs(regs, sys_reg, rt);
2831 }
2832 
2833 static struct undef_hook mrs_hook = {
2834 	.instr_mask = 0xfff00000,
2835 	.instr_val  = 0xd5300000,
2836 	.pstate_mask = PSR_AA32_MODE_MASK,
2837 	.pstate_val = PSR_MODE_EL0t,
2838 	.fn = emulate_mrs,
2839 };
2840 
enable_mrs_emulation(void)2841 static int __init enable_mrs_emulation(void)
2842 {
2843 	register_undef_hook(&mrs_hook);
2844 	return 0;
2845 }
2846 
2847 core_initcall(enable_mrs_emulation);
2848 
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)2849 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
2850 			  char *buf)
2851 {
2852 	if (__meltdown_safe)
2853 		return sprintf(buf, "Not affected\n");
2854 
2855 	if (arm64_kernel_unmapped_at_el0())
2856 		return sprintf(buf, "Mitigation: PTI\n");
2857 
2858 	return sprintf(buf, "Vulnerable\n");
2859 }
2860