1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/kvm/coproc.c:
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Authors: Rusty Russell <rusty@rustcorp.com.au>
8  *          Christoffer Dall <c.dall@virtualopensystems.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License, version 2, as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
25 #include <linux/mm.h>
26 #include <linux/printk.h>
27 #include <linux/uaccess.h>
28 
29 #include <asm/cacheflush.h>
30 #include <asm/cputype.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/esr.h>
33 #include <asm/kvm_arm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_hyp.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/perf_event.h>
40 #include <asm/sysreg.h>
41 
42 #include <trace/events/kvm.h>
43 
44 #include "sys_regs.h"
45 
46 #include "trace.h"
47 
48 /*
49  * All of this file is extremly similar to the ARM coproc.c, but the
50  * types are different. My gut feeling is that it should be pretty
51  * easy to merge, but that would be an ABI breakage -- again. VFP
52  * would also need to be abstracted.
53  *
54  * For AArch32, we only take care of what is being trapped. Anything
55  * that has to do with init and userspace access has to go via the
56  * 64bit interface.
57  */
58 
read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)59 static bool read_from_write_only(struct kvm_vcpu *vcpu,
60 				 struct sys_reg_params *params,
61 				 const struct sys_reg_desc *r)
62 {
63 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
64 	print_sys_reg_instr(params);
65 	kvm_inject_undefined(vcpu);
66 	return false;
67 }
68 
write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)69 static bool write_to_read_only(struct kvm_vcpu *vcpu,
70 			       struct sys_reg_params *params,
71 			       const struct sys_reg_desc *r)
72 {
73 	WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
74 	print_sys_reg_instr(params);
75 	kvm_inject_undefined(vcpu);
76 	return false;
77 }
78 
vcpu_read_sys_reg(struct kvm_vcpu * vcpu,int reg)79 u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg)
80 {
81 	if (!vcpu->arch.sysregs_loaded_on_cpu)
82 		goto immediate_read;
83 
84 	/*
85 	 * System registers listed in the switch are not saved on every
86 	 * exit from the guest but are only saved on vcpu_put.
87 	 *
88 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
89 	 * should never be listed below, because the guest cannot modify its
90 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
91 	 * thread when emulating cross-VCPU communication.
92 	 */
93 	switch (reg) {
94 	case CSSELR_EL1:	return read_sysreg_s(SYS_CSSELR_EL1);
95 	case SCTLR_EL1:		return read_sysreg_s(sctlr_EL12);
96 	case ACTLR_EL1:		return read_sysreg_s(SYS_ACTLR_EL1);
97 	case CPACR_EL1:		return read_sysreg_s(cpacr_EL12);
98 	case TTBR0_EL1:		return read_sysreg_s(ttbr0_EL12);
99 	case TTBR1_EL1:		return read_sysreg_s(ttbr1_EL12);
100 	case TCR_EL1:		return read_sysreg_s(tcr_EL12);
101 	case ESR_EL1:		return read_sysreg_s(esr_EL12);
102 	case AFSR0_EL1:		return read_sysreg_s(afsr0_EL12);
103 	case AFSR1_EL1:		return read_sysreg_s(afsr1_EL12);
104 	case FAR_EL1:		return read_sysreg_s(far_EL12);
105 	case MAIR_EL1:		return read_sysreg_s(mair_EL12);
106 	case VBAR_EL1:		return read_sysreg_s(vbar_EL12);
107 	case CONTEXTIDR_EL1:	return read_sysreg_s(contextidr_EL12);
108 	case TPIDR_EL0:		return read_sysreg_s(SYS_TPIDR_EL0);
109 	case TPIDRRO_EL0:	return read_sysreg_s(SYS_TPIDRRO_EL0);
110 	case TPIDR_EL1:		return read_sysreg_s(SYS_TPIDR_EL1);
111 	case AMAIR_EL1:		return read_sysreg_s(amair_EL12);
112 	case CNTKCTL_EL1:	return read_sysreg_s(cntkctl_EL12);
113 	case PAR_EL1:		return read_sysreg_s(SYS_PAR_EL1);
114 	case DACR32_EL2:	return read_sysreg_s(SYS_DACR32_EL2);
115 	case IFSR32_EL2:	return read_sysreg_s(SYS_IFSR32_EL2);
116 	case DBGVCR32_EL2:	return read_sysreg_s(SYS_DBGVCR32_EL2);
117 	}
118 
119 immediate_read:
120 	return __vcpu_sys_reg(vcpu, reg);
121 }
122 
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,int reg)123 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
124 {
125 	if (!vcpu->arch.sysregs_loaded_on_cpu)
126 		goto immediate_write;
127 
128 	/*
129 	 * System registers listed in the switch are not restored on every
130 	 * entry to the guest but are only restored on vcpu_load.
131 	 *
132 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
133 	 * should never be listed below, because the the MPIDR should only be
134 	 * set once, before running the VCPU, and never changed later.
135 	 */
136 	switch (reg) {
137 	case CSSELR_EL1:	write_sysreg_s(val, SYS_CSSELR_EL1);	return;
138 	case SCTLR_EL1:		write_sysreg_s(val, sctlr_EL12);	return;
139 	case ACTLR_EL1:		write_sysreg_s(val, SYS_ACTLR_EL1);	return;
140 	case CPACR_EL1:		write_sysreg_s(val, cpacr_EL12);	return;
141 	case TTBR0_EL1:		write_sysreg_s(val, ttbr0_EL12);	return;
142 	case TTBR1_EL1:		write_sysreg_s(val, ttbr1_EL12);	return;
143 	case TCR_EL1:		write_sysreg_s(val, tcr_EL12);		return;
144 	case ESR_EL1:		write_sysreg_s(val, esr_EL12);		return;
145 	case AFSR0_EL1:		write_sysreg_s(val, afsr0_EL12);	return;
146 	case AFSR1_EL1:		write_sysreg_s(val, afsr1_EL12);	return;
147 	case FAR_EL1:		write_sysreg_s(val, far_EL12);		return;
148 	case MAIR_EL1:		write_sysreg_s(val, mair_EL12);		return;
149 	case VBAR_EL1:		write_sysreg_s(val, vbar_EL12);		return;
150 	case CONTEXTIDR_EL1:	write_sysreg_s(val, contextidr_EL12);	return;
151 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	return;
152 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	return;
153 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	return;
154 	case AMAIR_EL1:		write_sysreg_s(val, amair_EL12);	return;
155 	case CNTKCTL_EL1:	write_sysreg_s(val, cntkctl_EL12);	return;
156 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	return;
157 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	return;
158 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	return;
159 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	return;
160 	}
161 
162 immediate_write:
163 	 __vcpu_sys_reg(vcpu, reg) = val;
164 }
165 
166 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
167 static u32 cache_levels;
168 
169 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
170 #define CSSELR_MAX 12
171 
172 /* Which cache CCSIDR represents depends on CSSELR value. */
get_ccsidr(u32 csselr)173 static u32 get_ccsidr(u32 csselr)
174 {
175 	u32 ccsidr;
176 
177 	/* Make sure noone else changes CSSELR during this! */
178 	local_irq_disable();
179 	write_sysreg(csselr, csselr_el1);
180 	isb();
181 	ccsidr = read_sysreg(ccsidr_el1);
182 	local_irq_enable();
183 
184 	return ccsidr;
185 }
186 
187 /*
188  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
189  */
access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)190 static bool access_dcsw(struct kvm_vcpu *vcpu,
191 			struct sys_reg_params *p,
192 			const struct sys_reg_desc *r)
193 {
194 	if (!p->is_write)
195 		return read_from_write_only(vcpu, p, r);
196 
197 	/*
198 	 * Only track S/W ops if we don't have FWB. It still indicates
199 	 * that the guest is a bit broken (S/W operations should only
200 	 * be done by firmware, knowing that there is only a single
201 	 * CPU left in the system, and certainly not from non-secure
202 	 * software).
203 	 */
204 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
205 		kvm_set_way_flush(vcpu);
206 
207 	return true;
208 }
209 
210 /*
211  * Generic accessor for VM registers. Only called as long as HCR_TVM
212  * is set. If the guest enables the MMU, we stop trapping the VM
213  * sys_regs and leave it in complete control of the caches.
214  */
access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)215 static bool access_vm_reg(struct kvm_vcpu *vcpu,
216 			  struct sys_reg_params *p,
217 			  const struct sys_reg_desc *r)
218 {
219 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
220 	u64 val;
221 	int reg = r->reg;
222 
223 	BUG_ON(!p->is_write);
224 
225 	/* See the 32bit mapping in kvm_host.h */
226 	if (p->is_aarch32)
227 		reg = r->reg / 2;
228 
229 	if (!p->is_aarch32 || !p->is_32bit) {
230 		val = p->regval;
231 	} else {
232 		val = vcpu_read_sys_reg(vcpu, reg);
233 		if (r->reg % 2)
234 			val = (p->regval << 32) | (u64)lower_32_bits(val);
235 		else
236 			val = ((u64)upper_32_bits(val) << 32) |
237 				lower_32_bits(p->regval);
238 	}
239 	vcpu_write_sys_reg(vcpu, val, reg);
240 
241 	kvm_toggle_cache(vcpu, was_enabled);
242 	return true;
243 }
244 
245 /*
246  * Trap handler for the GICv3 SGI generation system register.
247  * Forward the request to the VGIC emulation.
248  * The cp15_64 code makes sure this automatically works
249  * for both AArch64 and AArch32 accesses.
250  */
access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)251 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
252 			   struct sys_reg_params *p,
253 			   const struct sys_reg_desc *r)
254 {
255 	bool g1;
256 
257 	if (!p->is_write)
258 		return read_from_write_only(vcpu, p, r);
259 
260 	/*
261 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
262 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
263 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
264 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
265 	 * group.
266 	 */
267 	if (p->is_aarch32) {
268 		switch (p->Op1) {
269 		default:		/* Keep GCC quiet */
270 		case 0:			/* ICC_SGI1R */
271 			g1 = true;
272 			break;
273 		case 1:			/* ICC_ASGI1R */
274 		case 2:			/* ICC_SGI0R */
275 			g1 = false;
276 			break;
277 		}
278 	} else {
279 		switch (p->Op2) {
280 		default:		/* Keep GCC quiet */
281 		case 5:			/* ICC_SGI1R_EL1 */
282 			g1 = true;
283 			break;
284 		case 6:			/* ICC_ASGI1R_EL1 */
285 		case 7:			/* ICC_SGI0R_EL1 */
286 			g1 = false;
287 			break;
288 		}
289 	}
290 
291 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
292 
293 	return true;
294 }
295 
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)296 static bool access_gic_sre(struct kvm_vcpu *vcpu,
297 			   struct sys_reg_params *p,
298 			   const struct sys_reg_desc *r)
299 {
300 	if (p->is_write)
301 		return ignore_write(vcpu, p);
302 
303 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
304 	return true;
305 }
306 
trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)307 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
308 			struct sys_reg_params *p,
309 			const struct sys_reg_desc *r)
310 {
311 	if (p->is_write)
312 		return ignore_write(vcpu, p);
313 	else
314 		return read_zero(vcpu, p);
315 }
316 
trap_undef(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)317 static bool trap_undef(struct kvm_vcpu *vcpu,
318 		       struct sys_reg_params *p,
319 		       const struct sys_reg_desc *r)
320 {
321 	kvm_inject_undefined(vcpu);
322 	return false;
323 }
324 
trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)325 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
326 			   struct sys_reg_params *p,
327 			   const struct sys_reg_desc *r)
328 {
329 	if (p->is_write) {
330 		return ignore_write(vcpu, p);
331 	} else {
332 		p->regval = (1 << 3);
333 		return true;
334 	}
335 }
336 
trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)337 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
338 				   struct sys_reg_params *p,
339 				   const struct sys_reg_desc *r)
340 {
341 	if (p->is_write) {
342 		return ignore_write(vcpu, p);
343 	} else {
344 		p->regval = read_sysreg(dbgauthstatus_el1);
345 		return true;
346 	}
347 }
348 
349 /*
350  * We want to avoid world-switching all the DBG registers all the
351  * time:
352  *
353  * - If we've touched any debug register, it is likely that we're
354  *   going to touch more of them. It then makes sense to disable the
355  *   traps and start doing the save/restore dance
356  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
357  *   then mandatory to save/restore the registers, as the guest
358  *   depends on them.
359  *
360  * For this, we use a DIRTY bit, indicating the guest has modified the
361  * debug registers, used as follow:
362  *
363  * On guest entry:
364  * - If the dirty bit is set (because we're coming back from trapping),
365  *   disable the traps, save host registers, restore guest registers.
366  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
367  *   set the dirty bit, disable the traps, save host registers,
368  *   restore guest registers.
369  * - Otherwise, enable the traps
370  *
371  * On guest exit:
372  * - If the dirty bit is set, save guest registers, restore host
373  *   registers and clear the dirty bit. This ensure that the host can
374  *   now use the debug registers.
375  */
trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)376 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
377 			    struct sys_reg_params *p,
378 			    const struct sys_reg_desc *r)
379 {
380 	if (p->is_write) {
381 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
382 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
383 	} else {
384 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
385 	}
386 
387 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
388 
389 	return true;
390 }
391 
392 /*
393  * reg_to_dbg/dbg_to_reg
394  *
395  * A 32 bit write to a debug register leave top bits alone
396  * A 32 bit read from a debug register only returns the bottom bits
397  *
398  * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
399  * hyp.S code switches between host and guest values in future.
400  */
reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,u64 * dbg_reg)401 static void reg_to_dbg(struct kvm_vcpu *vcpu,
402 		       struct sys_reg_params *p,
403 		       u64 *dbg_reg)
404 {
405 	u64 val = p->regval;
406 
407 	if (p->is_32bit) {
408 		val &= 0xffffffffUL;
409 		val |= ((*dbg_reg >> 32) << 32);
410 	}
411 
412 	*dbg_reg = val;
413 	vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
414 }
415 
dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,u64 * dbg_reg)416 static void dbg_to_reg(struct kvm_vcpu *vcpu,
417 		       struct sys_reg_params *p,
418 		       u64 *dbg_reg)
419 {
420 	p->regval = *dbg_reg;
421 	if (p->is_32bit)
422 		p->regval &= 0xffffffffUL;
423 }
424 
trap_bvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)425 static bool trap_bvr(struct kvm_vcpu *vcpu,
426 		     struct sys_reg_params *p,
427 		     const struct sys_reg_desc *rd)
428 {
429 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
430 
431 	if (p->is_write)
432 		reg_to_dbg(vcpu, p, dbg_reg);
433 	else
434 		dbg_to_reg(vcpu, p, dbg_reg);
435 
436 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
437 
438 	return true;
439 }
440 
set_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)441 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
442 		const struct kvm_one_reg *reg, void __user *uaddr)
443 {
444 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
445 
446 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
447 		return -EFAULT;
448 	return 0;
449 }
450 
get_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)451 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
452 	const struct kvm_one_reg *reg, void __user *uaddr)
453 {
454 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
455 
456 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
457 		return -EFAULT;
458 	return 0;
459 }
460 
reset_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)461 static void reset_bvr(struct kvm_vcpu *vcpu,
462 		      const struct sys_reg_desc *rd)
463 {
464 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
465 }
466 
trap_bcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)467 static bool trap_bcr(struct kvm_vcpu *vcpu,
468 		     struct sys_reg_params *p,
469 		     const struct sys_reg_desc *rd)
470 {
471 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
472 
473 	if (p->is_write)
474 		reg_to_dbg(vcpu, p, dbg_reg);
475 	else
476 		dbg_to_reg(vcpu, p, dbg_reg);
477 
478 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
479 
480 	return true;
481 }
482 
set_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)483 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
484 		const struct kvm_one_reg *reg, void __user *uaddr)
485 {
486 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
487 
488 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
489 		return -EFAULT;
490 
491 	return 0;
492 }
493 
get_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)494 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
495 	const struct kvm_one_reg *reg, void __user *uaddr)
496 {
497 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
498 
499 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
500 		return -EFAULT;
501 	return 0;
502 }
503 
reset_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)504 static void reset_bcr(struct kvm_vcpu *vcpu,
505 		      const struct sys_reg_desc *rd)
506 {
507 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
508 }
509 
trap_wvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)510 static bool trap_wvr(struct kvm_vcpu *vcpu,
511 		     struct sys_reg_params *p,
512 		     const struct sys_reg_desc *rd)
513 {
514 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
515 
516 	if (p->is_write)
517 		reg_to_dbg(vcpu, p, dbg_reg);
518 	else
519 		dbg_to_reg(vcpu, p, dbg_reg);
520 
521 	trace_trap_reg(__func__, rd->reg, p->is_write,
522 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
523 
524 	return true;
525 }
526 
set_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)527 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
528 		const struct kvm_one_reg *reg, void __user *uaddr)
529 {
530 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
531 
532 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
533 		return -EFAULT;
534 	return 0;
535 }
536 
get_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)537 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
538 	const struct kvm_one_reg *reg, void __user *uaddr)
539 {
540 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
541 
542 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
543 		return -EFAULT;
544 	return 0;
545 }
546 
reset_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)547 static void reset_wvr(struct kvm_vcpu *vcpu,
548 		      const struct sys_reg_desc *rd)
549 {
550 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
551 }
552 
trap_wcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)553 static bool trap_wcr(struct kvm_vcpu *vcpu,
554 		     struct sys_reg_params *p,
555 		     const struct sys_reg_desc *rd)
556 {
557 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
558 
559 	if (p->is_write)
560 		reg_to_dbg(vcpu, p, dbg_reg);
561 	else
562 		dbg_to_reg(vcpu, p, dbg_reg);
563 
564 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
565 
566 	return true;
567 }
568 
set_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)569 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
570 		const struct kvm_one_reg *reg, void __user *uaddr)
571 {
572 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
573 
574 	if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
575 		return -EFAULT;
576 	return 0;
577 }
578 
get_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)579 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
580 	const struct kvm_one_reg *reg, void __user *uaddr)
581 {
582 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
583 
584 	if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
585 		return -EFAULT;
586 	return 0;
587 }
588 
reset_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)589 static void reset_wcr(struct kvm_vcpu *vcpu,
590 		      const struct sys_reg_desc *rd)
591 {
592 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
593 }
594 
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)595 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
596 {
597 	u64 amair = read_sysreg(amair_el1);
598 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
599 }
600 
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)601 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
602 {
603 	u64 mpidr;
604 
605 	/*
606 	 * Map the vcpu_id into the first three affinity level fields of
607 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
608 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
609 	 * of the GICv3 to be able to address each CPU directly when
610 	 * sending IPIs.
611 	 */
612 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
613 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
614 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
615 	vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
616 }
617 
reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)618 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
619 {
620 	u64 pmcr, val;
621 
622 	pmcr = read_sysreg(pmcr_el0);
623 	/*
624 	 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
625 	 * except PMCR.E resetting to zero.
626 	 */
627 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
628 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
629 	__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
630 }
631 
check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)632 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
633 {
634 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
635 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
636 
637 	if (!enabled)
638 		kvm_inject_undefined(vcpu);
639 
640 	return !enabled;
641 }
642 
pmu_access_el0_disabled(struct kvm_vcpu * vcpu)643 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
644 {
645 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
646 }
647 
pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)648 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
649 {
650 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
651 }
652 
pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)653 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
654 {
655 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
656 }
657 
pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)658 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
659 {
660 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
661 }
662 
access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)663 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
664 			const struct sys_reg_desc *r)
665 {
666 	u64 val;
667 
668 	if (!kvm_arm_pmu_v3_ready(vcpu))
669 		return trap_raz_wi(vcpu, p, r);
670 
671 	if (pmu_access_el0_disabled(vcpu))
672 		return false;
673 
674 	if (p->is_write) {
675 		/* Only update writeable bits of PMCR */
676 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
677 		val &= ~ARMV8_PMU_PMCR_MASK;
678 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
679 		__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
680 		kvm_pmu_handle_pmcr(vcpu, val);
681 	} else {
682 		/* PMCR.P & PMCR.C are RAZ */
683 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
684 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
685 		p->regval = val;
686 	}
687 
688 	return true;
689 }
690 
access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)691 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
692 			  const struct sys_reg_desc *r)
693 {
694 	if (!kvm_arm_pmu_v3_ready(vcpu))
695 		return trap_raz_wi(vcpu, p, r);
696 
697 	if (pmu_access_event_counter_el0_disabled(vcpu))
698 		return false;
699 
700 	if (p->is_write)
701 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
702 	else
703 		/* return PMSELR.SEL field */
704 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
705 			    & ARMV8_PMU_COUNTER_MASK;
706 
707 	return true;
708 }
709 
access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)710 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
711 			  const struct sys_reg_desc *r)
712 {
713 	u64 pmceid;
714 
715 	if (!kvm_arm_pmu_v3_ready(vcpu))
716 		return trap_raz_wi(vcpu, p, r);
717 
718 	BUG_ON(p->is_write);
719 
720 	if (pmu_access_el0_disabled(vcpu))
721 		return false;
722 
723 	if (!(p->Op2 & 1))
724 		pmceid = read_sysreg(pmceid0_el0);
725 	else
726 		pmceid = read_sysreg(pmceid1_el0);
727 
728 	p->regval = pmceid;
729 
730 	return true;
731 }
732 
pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)733 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
734 {
735 	u64 pmcr, val;
736 
737 	pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
738 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
739 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
740 		kvm_inject_undefined(vcpu);
741 		return false;
742 	}
743 
744 	return true;
745 }
746 
access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)747 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
748 			      struct sys_reg_params *p,
749 			      const struct sys_reg_desc *r)
750 {
751 	u64 idx;
752 
753 	if (!kvm_arm_pmu_v3_ready(vcpu))
754 		return trap_raz_wi(vcpu, p, r);
755 
756 	if (r->CRn == 9 && r->CRm == 13) {
757 		if (r->Op2 == 2) {
758 			/* PMXEVCNTR_EL0 */
759 			if (pmu_access_event_counter_el0_disabled(vcpu))
760 				return false;
761 
762 			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
763 			      & ARMV8_PMU_COUNTER_MASK;
764 		} else if (r->Op2 == 0) {
765 			/* PMCCNTR_EL0 */
766 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
767 				return false;
768 
769 			idx = ARMV8_PMU_CYCLE_IDX;
770 		} else {
771 			return false;
772 		}
773 	} else if (r->CRn == 0 && r->CRm == 9) {
774 		/* PMCCNTR */
775 		if (pmu_access_event_counter_el0_disabled(vcpu))
776 			return false;
777 
778 		idx = ARMV8_PMU_CYCLE_IDX;
779 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
780 		/* PMEVCNTRn_EL0 */
781 		if (pmu_access_event_counter_el0_disabled(vcpu))
782 			return false;
783 
784 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
785 	} else {
786 		return false;
787 	}
788 
789 	if (!pmu_counter_idx_valid(vcpu, idx))
790 		return false;
791 
792 	if (p->is_write) {
793 		if (pmu_access_el0_disabled(vcpu))
794 			return false;
795 
796 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
797 	} else {
798 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
799 	}
800 
801 	return true;
802 }
803 
access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)804 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
805 			       const struct sys_reg_desc *r)
806 {
807 	u64 idx, reg;
808 
809 	if (!kvm_arm_pmu_v3_ready(vcpu))
810 		return trap_raz_wi(vcpu, p, r);
811 
812 	if (pmu_access_el0_disabled(vcpu))
813 		return false;
814 
815 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
816 		/* PMXEVTYPER_EL0 */
817 		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
818 		reg = PMEVTYPER0_EL0 + idx;
819 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
820 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
821 		if (idx == ARMV8_PMU_CYCLE_IDX)
822 			reg = PMCCFILTR_EL0;
823 		else
824 			/* PMEVTYPERn_EL0 */
825 			reg = PMEVTYPER0_EL0 + idx;
826 	} else {
827 		BUG();
828 	}
829 
830 	if (!pmu_counter_idx_valid(vcpu, idx))
831 		return false;
832 
833 	if (p->is_write) {
834 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
835 		__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
836 	} else {
837 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
838 	}
839 
840 	return true;
841 }
842 
access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)843 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
844 			   const struct sys_reg_desc *r)
845 {
846 	u64 val, mask;
847 
848 	if (!kvm_arm_pmu_v3_ready(vcpu))
849 		return trap_raz_wi(vcpu, p, r);
850 
851 	if (pmu_access_el0_disabled(vcpu))
852 		return false;
853 
854 	mask = kvm_pmu_valid_counter_mask(vcpu);
855 	if (p->is_write) {
856 		val = p->regval & mask;
857 		if (r->Op2 & 0x1) {
858 			/* accessing PMCNTENSET_EL0 */
859 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
860 			kvm_pmu_enable_counter(vcpu, val);
861 		} else {
862 			/* accessing PMCNTENCLR_EL0 */
863 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
864 			kvm_pmu_disable_counter(vcpu, val);
865 		}
866 	} else {
867 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
868 	}
869 
870 	return true;
871 }
872 
access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)873 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
874 			   const struct sys_reg_desc *r)
875 {
876 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
877 
878 	if (!kvm_arm_pmu_v3_ready(vcpu))
879 		return trap_raz_wi(vcpu, p, r);
880 
881 	if (!vcpu_mode_priv(vcpu)) {
882 		kvm_inject_undefined(vcpu);
883 		return false;
884 	}
885 
886 	if (p->is_write) {
887 		u64 val = p->regval & mask;
888 
889 		if (r->Op2 & 0x1)
890 			/* accessing PMINTENSET_EL1 */
891 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
892 		else
893 			/* accessing PMINTENCLR_EL1 */
894 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
895 	} else {
896 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
897 	}
898 
899 	return true;
900 }
901 
access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)902 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
903 			 const struct sys_reg_desc *r)
904 {
905 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
906 
907 	if (!kvm_arm_pmu_v3_ready(vcpu))
908 		return trap_raz_wi(vcpu, p, r);
909 
910 	if (pmu_access_el0_disabled(vcpu))
911 		return false;
912 
913 	if (p->is_write) {
914 		if (r->CRm & 0x2)
915 			/* accessing PMOVSSET_EL0 */
916 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
917 		else
918 			/* accessing PMOVSCLR_EL0 */
919 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
920 	} else {
921 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
922 	}
923 
924 	return true;
925 }
926 
access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)927 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
928 			   const struct sys_reg_desc *r)
929 {
930 	u64 mask;
931 
932 	if (!kvm_arm_pmu_v3_ready(vcpu))
933 		return trap_raz_wi(vcpu, p, r);
934 
935 	if (!p->is_write)
936 		return read_from_write_only(vcpu, p, r);
937 
938 	if (pmu_write_swinc_el0_disabled(vcpu))
939 		return false;
940 
941 	mask = kvm_pmu_valid_counter_mask(vcpu);
942 	kvm_pmu_software_increment(vcpu, p->regval & mask);
943 	return true;
944 }
945 
access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)946 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
947 			     const struct sys_reg_desc *r)
948 {
949 	if (!kvm_arm_pmu_v3_ready(vcpu))
950 		return trap_raz_wi(vcpu, p, r);
951 
952 	if (p->is_write) {
953 		if (!vcpu_mode_priv(vcpu)) {
954 			kvm_inject_undefined(vcpu);
955 			return false;
956 		}
957 
958 		__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
959 			       p->regval & ARMV8_PMU_USERENR_MASK;
960 	} else {
961 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
962 			    & ARMV8_PMU_USERENR_MASK;
963 	}
964 
965 	return true;
966 }
967 
968 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
969 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
970 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
971 	  trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },		\
972 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
973 	  trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },		\
974 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
975 	  trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },		\
976 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
977 	  trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
978 
979 /* Macro to expand the PMEVCNTRn_EL0 register */
980 #define PMU_PMEVCNTR_EL0(n)						\
981 	{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)),					\
982 	  access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
983 
984 /* Macro to expand the PMEVTYPERn_EL0 register */
985 #define PMU_PMEVTYPER_EL0(n)						\
986 	{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)),					\
987 	  access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
988 
access_cntp_tval(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)989 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
990 		struct sys_reg_params *p,
991 		const struct sys_reg_desc *r)
992 {
993 	u64 now = kvm_phys_timer_read();
994 	u64 cval;
995 
996 	if (p->is_write) {
997 		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
998 				      p->regval + now);
999 	} else {
1000 		cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1001 		p->regval = cval - now;
1002 	}
1003 
1004 	return true;
1005 }
1006 
access_cntp_ctl(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1007 static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
1008 		struct sys_reg_params *p,
1009 		const struct sys_reg_desc *r)
1010 {
1011 	if (p->is_write)
1012 		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
1013 	else
1014 		p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
1015 
1016 	return true;
1017 }
1018 
access_cntp_cval(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1019 static bool access_cntp_cval(struct kvm_vcpu *vcpu,
1020 		struct sys_reg_params *p,
1021 		const struct sys_reg_desc *r)
1022 {
1023 	if (p->is_write)
1024 		kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
1025 	else
1026 		p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1027 
1028 	return true;
1029 }
1030 
1031 /* Read a sanitised cpufeature ID register by sys_reg_desc */
read_id_reg(struct sys_reg_desc const * r,bool raz)1032 static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1033 {
1034 	u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1035 			 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1036 	u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1037 
1038 	if (id == SYS_ID_AA64PFR0_EL1) {
1039 		if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
1040 			kvm_debug("SVE unsupported for guests, suppressing\n");
1041 
1042 		val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1043 	} else if (id == SYS_ID_AA64MMFR1_EL1) {
1044 		if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1045 			kvm_debug("LORegions unsupported for guests, suppressing\n");
1046 
1047 		val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1048 	}
1049 
1050 	return val;
1051 }
1052 
1053 /* cpufeature ID register access trap handlers */
1054 
__access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r,bool raz)1055 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1056 			    struct sys_reg_params *p,
1057 			    const struct sys_reg_desc *r,
1058 			    bool raz)
1059 {
1060 	if (p->is_write)
1061 		return write_to_read_only(vcpu, p, r);
1062 
1063 	p->regval = read_id_reg(r, raz);
1064 	return true;
1065 }
1066 
access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1067 static bool access_id_reg(struct kvm_vcpu *vcpu,
1068 			  struct sys_reg_params *p,
1069 			  const struct sys_reg_desc *r)
1070 {
1071 	return __access_id_reg(vcpu, p, r, false);
1072 }
1073 
access_raz_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1074 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1075 			      struct sys_reg_params *p,
1076 			      const struct sys_reg_desc *r)
1077 {
1078 	return __access_id_reg(vcpu, p, r, true);
1079 }
1080 
1081 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1082 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1083 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1084 
1085 /*
1086  * cpufeature ID register user accessors
1087  *
1088  * For now, these registers are immutable for userspace, so no values
1089  * are stored, and for set_id_reg() we don't allow the effective value
1090  * to be changed.
1091  */
__get_id_reg(const struct sys_reg_desc * rd,void __user * uaddr,bool raz)1092 static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1093 			bool raz)
1094 {
1095 	const u64 id = sys_reg_to_index(rd);
1096 	const u64 val = read_id_reg(rd, raz);
1097 
1098 	return reg_to_user(uaddr, &val, id);
1099 }
1100 
__set_id_reg(const struct sys_reg_desc * rd,void __user * uaddr,bool raz)1101 static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1102 			bool raz)
1103 {
1104 	const u64 id = sys_reg_to_index(rd);
1105 	int err;
1106 	u64 val;
1107 
1108 	err = reg_from_user(&val, uaddr, id);
1109 	if (err)
1110 		return err;
1111 
1112 	/* This is what we mean by invariant: you can't change it. */
1113 	if (val != read_id_reg(rd, raz))
1114 		return -EINVAL;
1115 
1116 	return 0;
1117 }
1118 
get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1119 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1120 		      const struct kvm_one_reg *reg, void __user *uaddr)
1121 {
1122 	return __get_id_reg(rd, uaddr, false);
1123 }
1124 
set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1125 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1126 		      const struct kvm_one_reg *reg, void __user *uaddr)
1127 {
1128 	return __set_id_reg(rd, uaddr, false);
1129 }
1130 
get_raz_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1131 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1132 			  const struct kvm_one_reg *reg, void __user *uaddr)
1133 {
1134 	return __get_id_reg(rd, uaddr, true);
1135 }
1136 
set_raz_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,const struct kvm_one_reg * reg,void __user * uaddr)1137 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1138 			  const struct kvm_one_reg *reg, void __user *uaddr)
1139 {
1140 	return __set_id_reg(rd, uaddr, true);
1141 }
1142 
1143 /* sys_reg_desc initialiser for known cpufeature ID registers */
1144 #define ID_SANITISED(name) {			\
1145 	SYS_DESC(SYS_##name),			\
1146 	.access	= access_id_reg,		\
1147 	.get_user = get_id_reg,			\
1148 	.set_user = set_id_reg,			\
1149 }
1150 
1151 /*
1152  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1153  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1154  * (1 <= crm < 8, 0 <= Op2 < 8).
1155  */
1156 #define ID_UNALLOCATED(crm, op2) {			\
1157 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
1158 	.access = access_raz_id_reg,			\
1159 	.get_user = get_raz_id_reg,			\
1160 	.set_user = set_raz_id_reg,			\
1161 }
1162 
1163 /*
1164  * sys_reg_desc initialiser for known ID registers that we hide from guests.
1165  * For now, these are exposed just like unallocated ID regs: they appear
1166  * RAZ for the guest.
1167  */
1168 #define ID_HIDDEN(name) {			\
1169 	SYS_DESC(SYS_##name),			\
1170 	.access = access_raz_id_reg,		\
1171 	.get_user = get_raz_id_reg,		\
1172 	.set_user = set_raz_id_reg,		\
1173 }
1174 
1175 /*
1176  * Architected system registers.
1177  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1178  *
1179  * Debug handling: We do trap most, if not all debug related system
1180  * registers. The implementation is good enough to ensure that a guest
1181  * can use these with minimal performance degradation. The drawback is
1182  * that we don't implement any of the external debug, none of the
1183  * OSlock protocol. This should be revisited if we ever encounter a
1184  * more demanding guest...
1185  */
1186 static const struct sys_reg_desc sys_reg_descs[] = {
1187 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
1188 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
1189 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
1190 
1191 	DBG_BCR_BVR_WCR_WVR_EL1(0),
1192 	DBG_BCR_BVR_WCR_WVR_EL1(1),
1193 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1194 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1195 	DBG_BCR_BVR_WCR_WVR_EL1(2),
1196 	DBG_BCR_BVR_WCR_WVR_EL1(3),
1197 	DBG_BCR_BVR_WCR_WVR_EL1(4),
1198 	DBG_BCR_BVR_WCR_WVR_EL1(5),
1199 	DBG_BCR_BVR_WCR_WVR_EL1(6),
1200 	DBG_BCR_BVR_WCR_WVR_EL1(7),
1201 	DBG_BCR_BVR_WCR_WVR_EL1(8),
1202 	DBG_BCR_BVR_WCR_WVR_EL1(9),
1203 	DBG_BCR_BVR_WCR_WVR_EL1(10),
1204 	DBG_BCR_BVR_WCR_WVR_EL1(11),
1205 	DBG_BCR_BVR_WCR_WVR_EL1(12),
1206 	DBG_BCR_BVR_WCR_WVR_EL1(13),
1207 	DBG_BCR_BVR_WCR_WVR_EL1(14),
1208 	DBG_BCR_BVR_WCR_WVR_EL1(15),
1209 
1210 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1211 	{ SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1212 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1213 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1214 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1215 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1216 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1217 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1218 
1219 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1220 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1221 	// DBGDTR[TR]X_EL0 share the same encoding
1222 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1223 
1224 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1225 
1226 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1227 
1228 	/*
1229 	 * ID regs: all ID_SANITISED() entries here must have corresponding
1230 	 * entries in arm64_ftr_regs[].
1231 	 */
1232 
1233 	/* AArch64 mappings of the AArch32 ID registers */
1234 	/* CRm=1 */
1235 	ID_SANITISED(ID_PFR0_EL1),
1236 	ID_SANITISED(ID_PFR1_EL1),
1237 	ID_SANITISED(ID_DFR0_EL1),
1238 	ID_HIDDEN(ID_AFR0_EL1),
1239 	ID_SANITISED(ID_MMFR0_EL1),
1240 	ID_SANITISED(ID_MMFR1_EL1),
1241 	ID_SANITISED(ID_MMFR2_EL1),
1242 	ID_SANITISED(ID_MMFR3_EL1),
1243 
1244 	/* CRm=2 */
1245 	ID_SANITISED(ID_ISAR0_EL1),
1246 	ID_SANITISED(ID_ISAR1_EL1),
1247 	ID_SANITISED(ID_ISAR2_EL1),
1248 	ID_SANITISED(ID_ISAR3_EL1),
1249 	ID_SANITISED(ID_ISAR4_EL1),
1250 	ID_SANITISED(ID_ISAR5_EL1),
1251 	ID_SANITISED(ID_MMFR4_EL1),
1252 	ID_UNALLOCATED(2,7),
1253 
1254 	/* CRm=3 */
1255 	ID_SANITISED(MVFR0_EL1),
1256 	ID_SANITISED(MVFR1_EL1),
1257 	ID_SANITISED(MVFR2_EL1),
1258 	ID_UNALLOCATED(3,3),
1259 	ID_UNALLOCATED(3,4),
1260 	ID_UNALLOCATED(3,5),
1261 	ID_UNALLOCATED(3,6),
1262 	ID_UNALLOCATED(3,7),
1263 
1264 	/* AArch64 ID registers */
1265 	/* CRm=4 */
1266 	ID_SANITISED(ID_AA64PFR0_EL1),
1267 	ID_SANITISED(ID_AA64PFR1_EL1),
1268 	ID_UNALLOCATED(4,2),
1269 	ID_UNALLOCATED(4,3),
1270 	ID_UNALLOCATED(4,4),
1271 	ID_UNALLOCATED(4,5),
1272 	ID_UNALLOCATED(4,6),
1273 	ID_UNALLOCATED(4,7),
1274 
1275 	/* CRm=5 */
1276 	ID_SANITISED(ID_AA64DFR0_EL1),
1277 	ID_SANITISED(ID_AA64DFR1_EL1),
1278 	ID_UNALLOCATED(5,2),
1279 	ID_UNALLOCATED(5,3),
1280 	ID_HIDDEN(ID_AA64AFR0_EL1),
1281 	ID_HIDDEN(ID_AA64AFR1_EL1),
1282 	ID_UNALLOCATED(5,6),
1283 	ID_UNALLOCATED(5,7),
1284 
1285 	/* CRm=6 */
1286 	ID_SANITISED(ID_AA64ISAR0_EL1),
1287 	ID_SANITISED(ID_AA64ISAR1_EL1),
1288 	ID_UNALLOCATED(6,2),
1289 	ID_UNALLOCATED(6,3),
1290 	ID_UNALLOCATED(6,4),
1291 	ID_UNALLOCATED(6,5),
1292 	ID_UNALLOCATED(6,6),
1293 	ID_UNALLOCATED(6,7),
1294 
1295 	/* CRm=7 */
1296 	ID_SANITISED(ID_AA64MMFR0_EL1),
1297 	ID_SANITISED(ID_AA64MMFR1_EL1),
1298 	ID_SANITISED(ID_AA64MMFR2_EL1),
1299 	ID_UNALLOCATED(7,3),
1300 	ID_UNALLOCATED(7,4),
1301 	ID_UNALLOCATED(7,5),
1302 	ID_UNALLOCATED(7,6),
1303 	ID_UNALLOCATED(7,7),
1304 
1305 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1306 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1307 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1308 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1309 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1310 
1311 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1312 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1313 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1314 
1315 	{ SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1316 	{ SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1317 	{ SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1318 	{ SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1319 	{ SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1320 	{ SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1321 	{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1322 	{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1323 
1324 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1325 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1326 
1327 	{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1328 	{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1329 
1330 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1331 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1332 
1333 	{ SYS_DESC(SYS_LORSA_EL1), trap_undef },
1334 	{ SYS_DESC(SYS_LOREA_EL1), trap_undef },
1335 	{ SYS_DESC(SYS_LORN_EL1), trap_undef },
1336 	{ SYS_DESC(SYS_LORC_EL1), trap_undef },
1337 	{ SYS_DESC(SYS_LORID_EL1), trap_undef },
1338 
1339 	{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1340 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1341 
1342 	{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1343 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1344 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1345 	{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1346 	{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1347 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1348 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1349 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1350 	{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1351 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1352 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1353 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1354 
1355 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1356 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1357 
1358 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1359 
1360 	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
1361 
1362 	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
1363 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1364 	{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1365 	{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1366 	{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1367 	{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1368 	{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1369 	{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1370 	{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1371 	{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1372 	{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1373 	/*
1374 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1375 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1376 	 */
1377 	{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1378 	{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1379 
1380 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1381 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1382 
1383 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1384 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1385 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
1386 
1387 	/* PMEVCNTRn_EL0 */
1388 	PMU_PMEVCNTR_EL0(0),
1389 	PMU_PMEVCNTR_EL0(1),
1390 	PMU_PMEVCNTR_EL0(2),
1391 	PMU_PMEVCNTR_EL0(3),
1392 	PMU_PMEVCNTR_EL0(4),
1393 	PMU_PMEVCNTR_EL0(5),
1394 	PMU_PMEVCNTR_EL0(6),
1395 	PMU_PMEVCNTR_EL0(7),
1396 	PMU_PMEVCNTR_EL0(8),
1397 	PMU_PMEVCNTR_EL0(9),
1398 	PMU_PMEVCNTR_EL0(10),
1399 	PMU_PMEVCNTR_EL0(11),
1400 	PMU_PMEVCNTR_EL0(12),
1401 	PMU_PMEVCNTR_EL0(13),
1402 	PMU_PMEVCNTR_EL0(14),
1403 	PMU_PMEVCNTR_EL0(15),
1404 	PMU_PMEVCNTR_EL0(16),
1405 	PMU_PMEVCNTR_EL0(17),
1406 	PMU_PMEVCNTR_EL0(18),
1407 	PMU_PMEVCNTR_EL0(19),
1408 	PMU_PMEVCNTR_EL0(20),
1409 	PMU_PMEVCNTR_EL0(21),
1410 	PMU_PMEVCNTR_EL0(22),
1411 	PMU_PMEVCNTR_EL0(23),
1412 	PMU_PMEVCNTR_EL0(24),
1413 	PMU_PMEVCNTR_EL0(25),
1414 	PMU_PMEVCNTR_EL0(26),
1415 	PMU_PMEVCNTR_EL0(27),
1416 	PMU_PMEVCNTR_EL0(28),
1417 	PMU_PMEVCNTR_EL0(29),
1418 	PMU_PMEVCNTR_EL0(30),
1419 	/* PMEVTYPERn_EL0 */
1420 	PMU_PMEVTYPER_EL0(0),
1421 	PMU_PMEVTYPER_EL0(1),
1422 	PMU_PMEVTYPER_EL0(2),
1423 	PMU_PMEVTYPER_EL0(3),
1424 	PMU_PMEVTYPER_EL0(4),
1425 	PMU_PMEVTYPER_EL0(5),
1426 	PMU_PMEVTYPER_EL0(6),
1427 	PMU_PMEVTYPER_EL0(7),
1428 	PMU_PMEVTYPER_EL0(8),
1429 	PMU_PMEVTYPER_EL0(9),
1430 	PMU_PMEVTYPER_EL0(10),
1431 	PMU_PMEVTYPER_EL0(11),
1432 	PMU_PMEVTYPER_EL0(12),
1433 	PMU_PMEVTYPER_EL0(13),
1434 	PMU_PMEVTYPER_EL0(14),
1435 	PMU_PMEVTYPER_EL0(15),
1436 	PMU_PMEVTYPER_EL0(16),
1437 	PMU_PMEVTYPER_EL0(17),
1438 	PMU_PMEVTYPER_EL0(18),
1439 	PMU_PMEVTYPER_EL0(19),
1440 	PMU_PMEVTYPER_EL0(20),
1441 	PMU_PMEVTYPER_EL0(21),
1442 	PMU_PMEVTYPER_EL0(22),
1443 	PMU_PMEVTYPER_EL0(23),
1444 	PMU_PMEVTYPER_EL0(24),
1445 	PMU_PMEVTYPER_EL0(25),
1446 	PMU_PMEVTYPER_EL0(26),
1447 	PMU_PMEVTYPER_EL0(27),
1448 	PMU_PMEVTYPER_EL0(28),
1449 	PMU_PMEVTYPER_EL0(29),
1450 	PMU_PMEVTYPER_EL0(30),
1451 	/*
1452 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1453 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
1454 	 */
1455 	{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1456 
1457 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1458 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1459 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1460 };
1461 
trap_dbgidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1462 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1463 			struct sys_reg_params *p,
1464 			const struct sys_reg_desc *r)
1465 {
1466 	if (p->is_write) {
1467 		return ignore_write(vcpu, p);
1468 	} else {
1469 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1470 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1471 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1472 
1473 		p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1474 			     (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1475 			     (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1476 			     | (6 << 16) | (el3 << 14) | (el3 << 12));
1477 		return true;
1478 	}
1479 }
1480 
trap_debug32(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1481 static bool trap_debug32(struct kvm_vcpu *vcpu,
1482 			 struct sys_reg_params *p,
1483 			 const struct sys_reg_desc *r)
1484 {
1485 	if (p->is_write) {
1486 		vcpu_cp14(vcpu, r->reg) = p->regval;
1487 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1488 	} else {
1489 		p->regval = vcpu_cp14(vcpu, r->reg);
1490 	}
1491 
1492 	return true;
1493 }
1494 
1495 /* AArch32 debug register mappings
1496  *
1497  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1498  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1499  *
1500  * All control registers and watchpoint value registers are mapped to
1501  * the lower 32 bits of their AArch64 equivalents. We share the trap
1502  * handlers with the above AArch64 code which checks what mode the
1503  * system is in.
1504  */
1505 
trap_xvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)1506 static bool trap_xvr(struct kvm_vcpu *vcpu,
1507 		     struct sys_reg_params *p,
1508 		     const struct sys_reg_desc *rd)
1509 {
1510 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1511 
1512 	if (p->is_write) {
1513 		u64 val = *dbg_reg;
1514 
1515 		val &= 0xffffffffUL;
1516 		val |= p->regval << 32;
1517 		*dbg_reg = val;
1518 
1519 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1520 	} else {
1521 		p->regval = *dbg_reg >> 32;
1522 	}
1523 
1524 	trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1525 
1526 	return true;
1527 }
1528 
1529 #define DBG_BCR_BVR_WCR_WVR(n)						\
1530 	/* DBGBVRn */							\
1531 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, 	\
1532 	/* DBGBCRn */							\
1533 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	\
1534 	/* DBGWVRn */							\
1535 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	\
1536 	/* DBGWCRn */							\
1537 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1538 
1539 #define DBGBXVR(n)							\
1540 	{ Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1541 
1542 /*
1543  * Trapped cp14 registers. We generally ignore most of the external
1544  * debug, on the principle that they don't really make sense to a
1545  * guest. Revisit this one day, would this principle change.
1546  */
1547 static const struct sys_reg_desc cp14_regs[] = {
1548 	/* DBGIDR */
1549 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1550 	/* DBGDTRRXext */
1551 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1552 
1553 	DBG_BCR_BVR_WCR_WVR(0),
1554 	/* DBGDSCRint */
1555 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1556 	DBG_BCR_BVR_WCR_WVR(1),
1557 	/* DBGDCCINT */
1558 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1559 	/* DBGDSCRext */
1560 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1561 	DBG_BCR_BVR_WCR_WVR(2),
1562 	/* DBGDTR[RT]Xint */
1563 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1564 	/* DBGDTR[RT]Xext */
1565 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1566 	DBG_BCR_BVR_WCR_WVR(3),
1567 	DBG_BCR_BVR_WCR_WVR(4),
1568 	DBG_BCR_BVR_WCR_WVR(5),
1569 	/* DBGWFAR */
1570 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1571 	/* DBGOSECCR */
1572 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1573 	DBG_BCR_BVR_WCR_WVR(6),
1574 	/* DBGVCR */
1575 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1576 	DBG_BCR_BVR_WCR_WVR(7),
1577 	DBG_BCR_BVR_WCR_WVR(8),
1578 	DBG_BCR_BVR_WCR_WVR(9),
1579 	DBG_BCR_BVR_WCR_WVR(10),
1580 	DBG_BCR_BVR_WCR_WVR(11),
1581 	DBG_BCR_BVR_WCR_WVR(12),
1582 	DBG_BCR_BVR_WCR_WVR(13),
1583 	DBG_BCR_BVR_WCR_WVR(14),
1584 	DBG_BCR_BVR_WCR_WVR(15),
1585 
1586 	/* DBGDRAR (32bit) */
1587 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1588 
1589 	DBGBXVR(0),
1590 	/* DBGOSLAR */
1591 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1592 	DBGBXVR(1),
1593 	/* DBGOSLSR */
1594 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1595 	DBGBXVR(2),
1596 	DBGBXVR(3),
1597 	/* DBGOSDLR */
1598 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1599 	DBGBXVR(4),
1600 	/* DBGPRCR */
1601 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1602 	DBGBXVR(5),
1603 	DBGBXVR(6),
1604 	DBGBXVR(7),
1605 	DBGBXVR(8),
1606 	DBGBXVR(9),
1607 	DBGBXVR(10),
1608 	DBGBXVR(11),
1609 	DBGBXVR(12),
1610 	DBGBXVR(13),
1611 	DBGBXVR(14),
1612 	DBGBXVR(15),
1613 
1614 	/* DBGDSAR (32bit) */
1615 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1616 
1617 	/* DBGDEVID2 */
1618 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1619 	/* DBGDEVID1 */
1620 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1621 	/* DBGDEVID */
1622 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1623 	/* DBGCLAIMSET */
1624 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1625 	/* DBGCLAIMCLR */
1626 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1627 	/* DBGAUTHSTATUS */
1628 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1629 };
1630 
1631 /* Trapped cp14 64bit registers */
1632 static const struct sys_reg_desc cp14_64_regs[] = {
1633 	/* DBGDRAR (64bit) */
1634 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
1635 
1636 	/* DBGDSAR (64bit) */
1637 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
1638 };
1639 
1640 /* Macro to expand the PMEVCNTRn register */
1641 #define PMU_PMEVCNTR(n)							\
1642 	/* PMEVCNTRn */							\
1643 	{ Op1(0), CRn(0b1110),						\
1644 	  CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1645 	  access_pmu_evcntr }
1646 
1647 /* Macro to expand the PMEVTYPERn register */
1648 #define PMU_PMEVTYPER(n)						\
1649 	/* PMEVTYPERn */						\
1650 	{ Op1(0), CRn(0b1110),						\
1651 	  CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)),		\
1652 	  access_pmu_evtyper }
1653 
1654 /*
1655  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1656  * depending on the way they are accessed (as a 32bit or a 64bit
1657  * register).
1658  */
1659 static const struct sys_reg_desc cp15_regs[] = {
1660 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1661 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1662 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1663 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1664 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1665 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1666 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1667 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1668 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1669 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1670 	{ Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1671 
1672 	/*
1673 	 * DC{C,I,CI}SW operations:
1674 	 */
1675 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1676 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1677 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1678 
1679 	/* PMU */
1680 	{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1681 	{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1682 	{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1683 	{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1684 	{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1685 	{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1686 	{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1687 	{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1688 	{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1689 	{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1690 	{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1691 	{ Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1692 	{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1693 	{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1694 	{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1695 
1696 	{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1697 	{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1698 	{ Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1699 	{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1700 
1701 	/* ICC_SRE */
1702 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1703 
1704 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1705 
1706 	/* CNTP_TVAL */
1707 	{ Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
1708 	/* CNTP_CTL */
1709 	{ Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
1710 
1711 	/* PMEVCNTRn */
1712 	PMU_PMEVCNTR(0),
1713 	PMU_PMEVCNTR(1),
1714 	PMU_PMEVCNTR(2),
1715 	PMU_PMEVCNTR(3),
1716 	PMU_PMEVCNTR(4),
1717 	PMU_PMEVCNTR(5),
1718 	PMU_PMEVCNTR(6),
1719 	PMU_PMEVCNTR(7),
1720 	PMU_PMEVCNTR(8),
1721 	PMU_PMEVCNTR(9),
1722 	PMU_PMEVCNTR(10),
1723 	PMU_PMEVCNTR(11),
1724 	PMU_PMEVCNTR(12),
1725 	PMU_PMEVCNTR(13),
1726 	PMU_PMEVCNTR(14),
1727 	PMU_PMEVCNTR(15),
1728 	PMU_PMEVCNTR(16),
1729 	PMU_PMEVCNTR(17),
1730 	PMU_PMEVCNTR(18),
1731 	PMU_PMEVCNTR(19),
1732 	PMU_PMEVCNTR(20),
1733 	PMU_PMEVCNTR(21),
1734 	PMU_PMEVCNTR(22),
1735 	PMU_PMEVCNTR(23),
1736 	PMU_PMEVCNTR(24),
1737 	PMU_PMEVCNTR(25),
1738 	PMU_PMEVCNTR(26),
1739 	PMU_PMEVCNTR(27),
1740 	PMU_PMEVCNTR(28),
1741 	PMU_PMEVCNTR(29),
1742 	PMU_PMEVCNTR(30),
1743 	/* PMEVTYPERn */
1744 	PMU_PMEVTYPER(0),
1745 	PMU_PMEVTYPER(1),
1746 	PMU_PMEVTYPER(2),
1747 	PMU_PMEVTYPER(3),
1748 	PMU_PMEVTYPER(4),
1749 	PMU_PMEVTYPER(5),
1750 	PMU_PMEVTYPER(6),
1751 	PMU_PMEVTYPER(7),
1752 	PMU_PMEVTYPER(8),
1753 	PMU_PMEVTYPER(9),
1754 	PMU_PMEVTYPER(10),
1755 	PMU_PMEVTYPER(11),
1756 	PMU_PMEVTYPER(12),
1757 	PMU_PMEVTYPER(13),
1758 	PMU_PMEVTYPER(14),
1759 	PMU_PMEVTYPER(15),
1760 	PMU_PMEVTYPER(16),
1761 	PMU_PMEVTYPER(17),
1762 	PMU_PMEVTYPER(18),
1763 	PMU_PMEVTYPER(19),
1764 	PMU_PMEVTYPER(20),
1765 	PMU_PMEVTYPER(21),
1766 	PMU_PMEVTYPER(22),
1767 	PMU_PMEVTYPER(23),
1768 	PMU_PMEVTYPER(24),
1769 	PMU_PMEVTYPER(25),
1770 	PMU_PMEVTYPER(26),
1771 	PMU_PMEVTYPER(27),
1772 	PMU_PMEVTYPER(28),
1773 	PMU_PMEVTYPER(29),
1774 	PMU_PMEVTYPER(30),
1775 	/* PMCCFILTR */
1776 	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1777 };
1778 
1779 static const struct sys_reg_desc cp15_64_regs[] = {
1780 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1781 	{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1782 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
1783 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1784 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
1785 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
1786 	{ Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
1787 };
1788 
1789 /* Target specific emulation tables */
1790 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1791 
kvm_register_target_sys_reg_table(unsigned int target,struct kvm_sys_reg_target_table * table)1792 void kvm_register_target_sys_reg_table(unsigned int target,
1793 				       struct kvm_sys_reg_target_table *table)
1794 {
1795 	target_tables[target] = table;
1796 }
1797 
1798 /* Get specific register table for this target. */
get_target_table(unsigned target,bool mode_is_64,size_t * num)1799 static const struct sys_reg_desc *get_target_table(unsigned target,
1800 						   bool mode_is_64,
1801 						   size_t *num)
1802 {
1803 	struct kvm_sys_reg_target_table *table;
1804 
1805 	table = target_tables[target];
1806 	if (mode_is_64) {
1807 		*num = table->table64.num;
1808 		return table->table64.table;
1809 	} else {
1810 		*num = table->table32.num;
1811 		return table->table32.table;
1812 	}
1813 }
1814 
1815 #define reg_to_match_value(x)						\
1816 	({								\
1817 		unsigned long val;					\
1818 		val  = (x)->Op0 << 14;					\
1819 		val |= (x)->Op1 << 11;					\
1820 		val |= (x)->CRn << 7;					\
1821 		val |= (x)->CRm << 3;					\
1822 		val |= (x)->Op2;					\
1823 		val;							\
1824 	 })
1825 
match_sys_reg(const void * key,const void * elt)1826 static int match_sys_reg(const void *key, const void *elt)
1827 {
1828 	const unsigned long pval = (unsigned long)key;
1829 	const struct sys_reg_desc *r = elt;
1830 
1831 	return pval - reg_to_match_value(r);
1832 }
1833 
find_reg(const struct sys_reg_params * params,const struct sys_reg_desc table[],unsigned int num)1834 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1835 					 const struct sys_reg_desc table[],
1836 					 unsigned int num)
1837 {
1838 	unsigned long pval = reg_to_match_value(params);
1839 
1840 	return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1841 }
1842 
kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu,struct kvm_run * run)1843 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1844 {
1845 	kvm_inject_undefined(vcpu);
1846 	return 1;
1847 }
1848 
perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)1849 static void perform_access(struct kvm_vcpu *vcpu,
1850 			   struct sys_reg_params *params,
1851 			   const struct sys_reg_desc *r)
1852 {
1853 	/*
1854 	 * Not having an accessor means that we have configured a trap
1855 	 * that we don't know how to handle. This certainly qualifies
1856 	 * as a gross bug that should be fixed right away.
1857 	 */
1858 	BUG_ON(!r->access);
1859 
1860 	/* Skip instruction if instructed so */
1861 	if (likely(r->access(vcpu, params, r)))
1862 		kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1863 }
1864 
1865 /*
1866  * emulate_cp --  tries to match a sys_reg access in a handling table, and
1867  *                call the corresponding trap handler.
1868  *
1869  * @params: pointer to the descriptor of the access
1870  * @table: array of trap descriptors
1871  * @num: size of the trap descriptor array
1872  *
1873  * Return 0 if the access has been handled, and -1 if not.
1874  */
emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)1875 static int emulate_cp(struct kvm_vcpu *vcpu,
1876 		      struct sys_reg_params *params,
1877 		      const struct sys_reg_desc *table,
1878 		      size_t num)
1879 {
1880 	const struct sys_reg_desc *r;
1881 
1882 	if (!table)
1883 		return -1;	/* Not handled */
1884 
1885 	r = find_reg(params, table, num);
1886 
1887 	if (r) {
1888 		perform_access(vcpu, params, r);
1889 		return 0;
1890 	}
1891 
1892 	/* Not handled */
1893 	return -1;
1894 }
1895 
unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)1896 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1897 				struct sys_reg_params *params)
1898 {
1899 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1900 	int cp = -1;
1901 
1902 	switch(hsr_ec) {
1903 	case ESR_ELx_EC_CP15_32:
1904 	case ESR_ELx_EC_CP15_64:
1905 		cp = 15;
1906 		break;
1907 	case ESR_ELx_EC_CP14_MR:
1908 	case ESR_ELx_EC_CP14_64:
1909 		cp = 14;
1910 		break;
1911 	default:
1912 		WARN_ON(1);
1913 	}
1914 
1915 	kvm_err("Unsupported guest CP%d access at: %08lx\n",
1916 		cp, *vcpu_pc(vcpu));
1917 	print_sys_reg_instr(params);
1918 	kvm_inject_undefined(vcpu);
1919 }
1920 
1921 /**
1922  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1923  * @vcpu: The VCPU pointer
1924  * @run:  The kvm_run struct
1925  */
kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global,const struct sys_reg_desc * target_specific,size_t nr_specific)1926 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1927 			    const struct sys_reg_desc *global,
1928 			    size_t nr_global,
1929 			    const struct sys_reg_desc *target_specific,
1930 			    size_t nr_specific)
1931 {
1932 	struct sys_reg_params params;
1933 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1934 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
1935 	int Rt2 = (hsr >> 10) & 0x1f;
1936 
1937 	params.is_aarch32 = true;
1938 	params.is_32bit = false;
1939 	params.CRm = (hsr >> 1) & 0xf;
1940 	params.is_write = ((hsr & 1) == 0);
1941 
1942 	params.Op0 = 0;
1943 	params.Op1 = (hsr >> 16) & 0xf;
1944 	params.Op2 = 0;
1945 	params.CRn = 0;
1946 
1947 	/*
1948 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1949 	 * backends between AArch32 and AArch64, we get away with it.
1950 	 */
1951 	if (params.is_write) {
1952 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1953 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1954 	}
1955 
1956 	/*
1957 	 * Try to emulate the coprocessor access using the target
1958 	 * specific table first, and using the global table afterwards.
1959 	 * If either of the tables contains a handler, handle the
1960 	 * potential register operation in the case of a read and return
1961 	 * with success.
1962 	 */
1963 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1964 	    !emulate_cp(vcpu, &params, global, nr_global)) {
1965 		/* Split up the value between registers for the read side */
1966 		if (!params.is_write) {
1967 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1968 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1969 		}
1970 
1971 		return 1;
1972 	}
1973 
1974 	unhandled_cp_access(vcpu, &params);
1975 	return 1;
1976 }
1977 
1978 /**
1979  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1980  * @vcpu: The VCPU pointer
1981  * @run:  The kvm_run struct
1982  */
kvm_handle_cp_32(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global,const struct sys_reg_desc * target_specific,size_t nr_specific)1983 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1984 			    const struct sys_reg_desc *global,
1985 			    size_t nr_global,
1986 			    const struct sys_reg_desc *target_specific,
1987 			    size_t nr_specific)
1988 {
1989 	struct sys_reg_params params;
1990 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
1991 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
1992 
1993 	params.is_aarch32 = true;
1994 	params.is_32bit = true;
1995 	params.CRm = (hsr >> 1) & 0xf;
1996 	params.regval = vcpu_get_reg(vcpu, Rt);
1997 	params.is_write = ((hsr & 1) == 0);
1998 	params.CRn = (hsr >> 10) & 0xf;
1999 	params.Op0 = 0;
2000 	params.Op1 = (hsr >> 14) & 0x7;
2001 	params.Op2 = (hsr >> 17) & 0x7;
2002 
2003 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
2004 	    !emulate_cp(vcpu, &params, global, nr_global)) {
2005 		if (!params.is_write)
2006 			vcpu_set_reg(vcpu, Rt, params.regval);
2007 		return 1;
2008 	}
2009 
2010 	unhandled_cp_access(vcpu, &params);
2011 	return 1;
2012 }
2013 
kvm_handle_cp15_64(struct kvm_vcpu * vcpu,struct kvm_run * run)2014 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2015 {
2016 	const struct sys_reg_desc *target_specific;
2017 	size_t num;
2018 
2019 	target_specific = get_target_table(vcpu->arch.target, false, &num);
2020 	return kvm_handle_cp_64(vcpu,
2021 				cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2022 				target_specific, num);
2023 }
2024 
kvm_handle_cp15_32(struct kvm_vcpu * vcpu,struct kvm_run * run)2025 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2026 {
2027 	const struct sys_reg_desc *target_specific;
2028 	size_t num;
2029 
2030 	target_specific = get_target_table(vcpu->arch.target, false, &num);
2031 	return kvm_handle_cp_32(vcpu,
2032 				cp15_regs, ARRAY_SIZE(cp15_regs),
2033 				target_specific, num);
2034 }
2035 
kvm_handle_cp14_64(struct kvm_vcpu * vcpu,struct kvm_run * run)2036 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2037 {
2038 	return kvm_handle_cp_64(vcpu,
2039 				cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2040 				NULL, 0);
2041 }
2042 
kvm_handle_cp14_32(struct kvm_vcpu * vcpu,struct kvm_run * run)2043 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2044 {
2045 	return kvm_handle_cp_32(vcpu,
2046 				cp14_regs, ARRAY_SIZE(cp14_regs),
2047 				NULL, 0);
2048 }
2049 
emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)2050 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2051 			   struct sys_reg_params *params)
2052 {
2053 	size_t num;
2054 	const struct sys_reg_desc *table, *r;
2055 
2056 	table = get_target_table(vcpu->arch.target, true, &num);
2057 
2058 	/* Search target-specific then generic table. */
2059 	r = find_reg(params, table, num);
2060 	if (!r)
2061 		r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2062 
2063 	if (likely(r)) {
2064 		perform_access(vcpu, params, r);
2065 	} else {
2066 		kvm_err("Unsupported guest sys_reg access at: %lx\n",
2067 			*vcpu_pc(vcpu));
2068 		print_sys_reg_instr(params);
2069 		kvm_inject_undefined(vcpu);
2070 	}
2071 	return 1;
2072 }
2073 
reset_sys_reg_descs(struct kvm_vcpu * vcpu,const struct sys_reg_desc * table,size_t num)2074 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2075 			      const struct sys_reg_desc *table, size_t num)
2076 {
2077 	unsigned long i;
2078 
2079 	for (i = 0; i < num; i++)
2080 		if (table[i].reset)
2081 			table[i].reset(vcpu, &table[i]);
2082 }
2083 
2084 /**
2085  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2086  * @vcpu: The VCPU pointer
2087  * @run:  The kvm_run struct
2088  */
kvm_handle_sys_reg(struct kvm_vcpu * vcpu,struct kvm_run * run)2089 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2090 {
2091 	struct sys_reg_params params;
2092 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2093 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2094 	int ret;
2095 
2096 	trace_kvm_handle_sys_reg(esr);
2097 
2098 	params.is_aarch32 = false;
2099 	params.is_32bit = false;
2100 	params.Op0 = (esr >> 20) & 3;
2101 	params.Op1 = (esr >> 14) & 0x7;
2102 	params.CRn = (esr >> 10) & 0xf;
2103 	params.CRm = (esr >> 1) & 0xf;
2104 	params.Op2 = (esr >> 17) & 0x7;
2105 	params.regval = vcpu_get_reg(vcpu, Rt);
2106 	params.is_write = !(esr & 1);
2107 
2108 	ret = emulate_sys_reg(vcpu, &params);
2109 
2110 	if (!params.is_write)
2111 		vcpu_set_reg(vcpu, Rt, params.regval);
2112 	return ret;
2113 }
2114 
2115 /******************************************************************************
2116  * Userspace API
2117  *****************************************************************************/
2118 
index_to_params(u64 id,struct sys_reg_params * params)2119 static bool index_to_params(u64 id, struct sys_reg_params *params)
2120 {
2121 	switch (id & KVM_REG_SIZE_MASK) {
2122 	case KVM_REG_SIZE_U64:
2123 		/* Any unused index bits means it's not valid. */
2124 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2125 			      | KVM_REG_ARM_COPROC_MASK
2126 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
2127 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
2128 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
2129 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
2130 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
2131 			return false;
2132 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2133 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2134 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2135 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2136 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2137 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2138 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2139 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2140 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2141 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2142 		return true;
2143 	default:
2144 		return false;
2145 	}
2146 }
2147 
find_reg_by_id(u64 id,struct sys_reg_params * params,const struct sys_reg_desc table[],unsigned int num)2148 const struct sys_reg_desc *find_reg_by_id(u64 id,
2149 					  struct sys_reg_params *params,
2150 					  const struct sys_reg_desc table[],
2151 					  unsigned int num)
2152 {
2153 	if (!index_to_params(id, params))
2154 		return NULL;
2155 
2156 	return find_reg(params, table, num);
2157 }
2158 
2159 /* Decode an index value, and find the sys_reg_desc entry. */
index_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id)2160 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2161 						    u64 id)
2162 {
2163 	size_t num;
2164 	const struct sys_reg_desc *table, *r;
2165 	struct sys_reg_params params;
2166 
2167 	/* We only do sys_reg for now. */
2168 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2169 		return NULL;
2170 
2171 	table = get_target_table(vcpu->arch.target, true, &num);
2172 	r = find_reg_by_id(id, &params, table, num);
2173 	if (!r)
2174 		r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2175 
2176 	/* Not saved in the sys_reg array and not otherwise accessible? */
2177 	if (r && !(r->reg || r->get_user))
2178 		r = NULL;
2179 
2180 	return r;
2181 }
2182 
2183 /*
2184  * These are the invariant sys_reg registers: we let the guest see the
2185  * host versions of these, so they're part of the guest state.
2186  *
2187  * A future CPU may provide a mechanism to present different values to
2188  * the guest, or a future kvm may trap them.
2189  */
2190 
2191 #define FUNCTION_INVARIANT(reg)						\
2192 	static void get_##reg(struct kvm_vcpu *v,			\
2193 			      const struct sys_reg_desc *r)		\
2194 	{								\
2195 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
2196 	}
2197 
2198 FUNCTION_INVARIANT(midr_el1)
2199 FUNCTION_INVARIANT(ctr_el0)
2200 FUNCTION_INVARIANT(revidr_el1)
2201 FUNCTION_INVARIANT(clidr_el1)
2202 FUNCTION_INVARIANT(aidr_el1)
2203 
2204 /* ->val is filled in by kvm_sys_reg_table_init() */
2205 static struct sys_reg_desc invariant_sys_regs[] = {
2206 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2207 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2208 	{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2209 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2210 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2211 };
2212 
reg_from_user(u64 * val,const void __user * uaddr,u64 id)2213 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2214 {
2215 	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2216 		return -EFAULT;
2217 	return 0;
2218 }
2219 
reg_to_user(void __user * uaddr,const u64 * val,u64 id)2220 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2221 {
2222 	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2223 		return -EFAULT;
2224 	return 0;
2225 }
2226 
get_invariant_sys_reg(u64 id,void __user * uaddr)2227 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2228 {
2229 	struct sys_reg_params params;
2230 	const struct sys_reg_desc *r;
2231 
2232 	r = find_reg_by_id(id, &params, invariant_sys_regs,
2233 			   ARRAY_SIZE(invariant_sys_regs));
2234 	if (!r)
2235 		return -ENOENT;
2236 
2237 	return reg_to_user(uaddr, &r->val, id);
2238 }
2239 
set_invariant_sys_reg(u64 id,void __user * uaddr)2240 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2241 {
2242 	struct sys_reg_params params;
2243 	const struct sys_reg_desc *r;
2244 	int err;
2245 	u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2246 
2247 	r = find_reg_by_id(id, &params, invariant_sys_regs,
2248 			   ARRAY_SIZE(invariant_sys_regs));
2249 	if (!r)
2250 		return -ENOENT;
2251 
2252 	err = reg_from_user(&val, uaddr, id);
2253 	if (err)
2254 		return err;
2255 
2256 	/* This is what we mean by invariant: you can't change it. */
2257 	if (r->val != val)
2258 		return -EINVAL;
2259 
2260 	return 0;
2261 }
2262 
is_valid_cache(u32 val)2263 static bool is_valid_cache(u32 val)
2264 {
2265 	u32 level, ctype;
2266 
2267 	if (val >= CSSELR_MAX)
2268 		return false;
2269 
2270 	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
2271 	level = (val >> 1);
2272 	ctype = (cache_levels >> (level * 3)) & 7;
2273 
2274 	switch (ctype) {
2275 	case 0: /* No cache */
2276 		return false;
2277 	case 1: /* Instruction cache only */
2278 		return (val & 1);
2279 	case 2: /* Data cache only */
2280 	case 4: /* Unified cache */
2281 		return !(val & 1);
2282 	case 3: /* Separate instruction and data caches */
2283 		return true;
2284 	default: /* Reserved: we can't know instruction or data. */
2285 		return false;
2286 	}
2287 }
2288 
demux_c15_get(u64 id,void __user * uaddr)2289 static int demux_c15_get(u64 id, void __user *uaddr)
2290 {
2291 	u32 val;
2292 	u32 __user *uval = uaddr;
2293 
2294 	/* Fail if we have unknown bits set. */
2295 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2296 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2297 		return -ENOENT;
2298 
2299 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2300 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2301 		if (KVM_REG_SIZE(id) != 4)
2302 			return -ENOENT;
2303 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2304 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2305 		if (!is_valid_cache(val))
2306 			return -ENOENT;
2307 
2308 		return put_user(get_ccsidr(val), uval);
2309 	default:
2310 		return -ENOENT;
2311 	}
2312 }
2313 
demux_c15_set(u64 id,void __user * uaddr)2314 static int demux_c15_set(u64 id, void __user *uaddr)
2315 {
2316 	u32 val, newval;
2317 	u32 __user *uval = uaddr;
2318 
2319 	/* Fail if we have unknown bits set. */
2320 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2321 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2322 		return -ENOENT;
2323 
2324 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2325 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2326 		if (KVM_REG_SIZE(id) != 4)
2327 			return -ENOENT;
2328 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2329 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2330 		if (!is_valid_cache(val))
2331 			return -ENOENT;
2332 
2333 		if (get_user(newval, uval))
2334 			return -EFAULT;
2335 
2336 		/* This is also invariant: you can't change it. */
2337 		if (newval != get_ccsidr(val))
2338 			return -EINVAL;
2339 		return 0;
2340 	default:
2341 		return -ENOENT;
2342 	}
2343 }
2344 
kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)2345 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2346 {
2347 	const struct sys_reg_desc *r;
2348 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2349 
2350 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2351 		return demux_c15_get(reg->id, uaddr);
2352 
2353 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2354 		return -ENOENT;
2355 
2356 	r = index_to_sys_reg_desc(vcpu, reg->id);
2357 	if (!r)
2358 		return get_invariant_sys_reg(reg->id, uaddr);
2359 
2360 	if (r->get_user)
2361 		return (r->get_user)(vcpu, r, reg, uaddr);
2362 
2363 	return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2364 }
2365 
kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)2366 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2367 {
2368 	const struct sys_reg_desc *r;
2369 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2370 
2371 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2372 		return demux_c15_set(reg->id, uaddr);
2373 
2374 	if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2375 		return -ENOENT;
2376 
2377 	r = index_to_sys_reg_desc(vcpu, reg->id);
2378 	if (!r)
2379 		return set_invariant_sys_reg(reg->id, uaddr);
2380 
2381 	if (r->set_user)
2382 		return (r->set_user)(vcpu, r, reg, uaddr);
2383 
2384 	return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2385 }
2386 
num_demux_regs(void)2387 static unsigned int num_demux_regs(void)
2388 {
2389 	unsigned int i, count = 0;
2390 
2391 	for (i = 0; i < CSSELR_MAX; i++)
2392 		if (is_valid_cache(i))
2393 			count++;
2394 
2395 	return count;
2396 }
2397 
write_demux_regids(u64 __user * uindices)2398 static int write_demux_regids(u64 __user *uindices)
2399 {
2400 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2401 	unsigned int i;
2402 
2403 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2404 	for (i = 0; i < CSSELR_MAX; i++) {
2405 		if (!is_valid_cache(i))
2406 			continue;
2407 		if (put_user(val | i, uindices))
2408 			return -EFAULT;
2409 		uindices++;
2410 	}
2411 	return 0;
2412 }
2413 
sys_reg_to_index(const struct sys_reg_desc * reg)2414 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2415 {
2416 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2417 		KVM_REG_ARM64_SYSREG |
2418 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2419 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2420 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2421 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2422 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2423 }
2424 
copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)2425 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2426 {
2427 	if (!*uind)
2428 		return true;
2429 
2430 	if (put_user(sys_reg_to_index(reg), *uind))
2431 		return false;
2432 
2433 	(*uind)++;
2434 	return true;
2435 }
2436 
walk_one_sys_reg(const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)2437 static int walk_one_sys_reg(const struct sys_reg_desc *rd,
2438 			    u64 __user **uind,
2439 			    unsigned int *total)
2440 {
2441 	/*
2442 	 * Ignore registers we trap but don't save,
2443 	 * and for which no custom user accessor is provided.
2444 	 */
2445 	if (!(rd->reg || rd->get_user))
2446 		return 0;
2447 
2448 	if (!copy_reg_to_user(rd, uind))
2449 		return -EFAULT;
2450 
2451 	(*total)++;
2452 	return 0;
2453 }
2454 
2455 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)2456 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2457 {
2458 	const struct sys_reg_desc *i1, *i2, *end1, *end2;
2459 	unsigned int total = 0;
2460 	size_t num;
2461 	int err;
2462 
2463 	/* We check for duplicates here, to allow arch-specific overrides. */
2464 	i1 = get_target_table(vcpu->arch.target, true, &num);
2465 	end1 = i1 + num;
2466 	i2 = sys_reg_descs;
2467 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2468 
2469 	BUG_ON(i1 == end1 || i2 == end2);
2470 
2471 	/* Walk carefully, as both tables may refer to the same register. */
2472 	while (i1 || i2) {
2473 		int cmp = cmp_sys_reg(i1, i2);
2474 		/* target-specific overrides generic entry. */
2475 		if (cmp <= 0)
2476 			err = walk_one_sys_reg(i1, &uind, &total);
2477 		else
2478 			err = walk_one_sys_reg(i2, &uind, &total);
2479 
2480 		if (err)
2481 			return err;
2482 
2483 		if (cmp <= 0 && ++i1 == end1)
2484 			i1 = NULL;
2485 		if (cmp >= 0 && ++i2 == end2)
2486 			i2 = NULL;
2487 	}
2488 	return total;
2489 }
2490 
kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)2491 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2492 {
2493 	return ARRAY_SIZE(invariant_sys_regs)
2494 		+ num_demux_regs()
2495 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
2496 }
2497 
kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)2498 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2499 {
2500 	unsigned int i;
2501 	int err;
2502 
2503 	/* Then give them all the invariant registers' indices. */
2504 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2505 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2506 			return -EFAULT;
2507 		uindices++;
2508 	}
2509 
2510 	err = walk_sys_regs(vcpu, uindices);
2511 	if (err < 0)
2512 		return err;
2513 	uindices += err;
2514 
2515 	return write_demux_regids(uindices);
2516 }
2517 
check_sysreg_table(const struct sys_reg_desc * table,unsigned int n)2518 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2519 {
2520 	unsigned int i;
2521 
2522 	for (i = 1; i < n; i++) {
2523 		if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2524 			kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2525 			return 1;
2526 		}
2527 	}
2528 
2529 	return 0;
2530 }
2531 
kvm_sys_reg_table_init(void)2532 void kvm_sys_reg_table_init(void)
2533 {
2534 	unsigned int i;
2535 	struct sys_reg_desc clidr;
2536 
2537 	/* Make sure tables are unique and in order. */
2538 	BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2539 	BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2540 	BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2541 	BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2542 	BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2543 	BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2544 
2545 	/* We abuse the reset function to overwrite the table itself. */
2546 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2547 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2548 
2549 	/*
2550 	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
2551 	 *
2552 	 *   If software reads the Cache Type fields from Ctype1
2553 	 *   upwards, once it has seen a value of 0b000, no caches
2554 	 *   exist at further-out levels of the hierarchy. So, for
2555 	 *   example, if Ctype3 is the first Cache Type field with a
2556 	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
2557 	 *   ignored.
2558 	 */
2559 	get_clidr_el1(NULL, &clidr); /* Ugly... */
2560 	cache_levels = clidr.val;
2561 	for (i = 0; i < 7; i++)
2562 		if (((cache_levels >> (i*3)) & 7) == 0)
2563 			break;
2564 	/* Clear all higher bits. */
2565 	cache_levels &= (1 << (i*3))-1;
2566 }
2567 
2568 /**
2569  * kvm_reset_sys_regs - sets system registers to reset value
2570  * @vcpu: The VCPU pointer
2571  *
2572  * This function finds the right table above and sets the registers on the
2573  * virtual CPU struct to their architecturally defined reset values.
2574  */
kvm_reset_sys_regs(struct kvm_vcpu * vcpu)2575 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2576 {
2577 	size_t num;
2578 	const struct sys_reg_desc *table;
2579 
2580 	/* Catch someone adding a register without putting in reset entry. */
2581 	memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2582 
2583 	/* Generic chip reset first (so target could override). */
2584 	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2585 
2586 	table = get_target_table(vcpu->arch.target, true, &num);
2587 	reset_sys_reg_descs(vcpu, table, num);
2588 
2589 	for (num = 1; num < NR_SYS_REGS; num++)
2590 		if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2591 			panic("Didn't reset __vcpu_sys_reg(%zi)", num);
2592 }
2593