1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/arm-smccc.h>
8 #include <linux/kvm_host.h>
9 #include <linux/types.h>
10 #include <linux/jump_label.h>
11 #include <uapi/linux/psci.h>
12 
13 #include <kvm/arm_psci.h>
14 
15 #include <asm/arch_gicv3.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kprobes.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_host.h>
21 #include <asm/kvm_hyp.h>
22 #include <asm/kvm_mmu.h>
23 #include <asm/fpsimd.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/processor.h>
26 #include <asm/thread_info.h>
27 
28 /* Check whether the FP regs were dirtied while in the host-side run loop: */
update_fp_enabled(struct kvm_vcpu * vcpu)29 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
30 {
31 	if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
32 		vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
33 				      KVM_ARM64_FP_HOST);
34 
35 	return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
36 }
37 
38 /* Save the 32-bit only FPSIMD system register state */
__fpsimd_save_fpexc32(struct kvm_vcpu * vcpu)39 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
40 {
41 	if (!vcpu_el1_is_32bit(vcpu))
42 		return;
43 
44 	vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
45 }
46 
__activate_traps_fpsimd32(struct kvm_vcpu * vcpu)47 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
48 {
49 	/*
50 	 * We are about to set CPTR_EL2.TFP to trap all floating point
51 	 * register accesses to EL2, however, the ARM ARM clearly states that
52 	 * traps are only taken to EL2 if the operation would not otherwise
53 	 * trap to EL1.  Therefore, always make sure that for 32-bit guests,
54 	 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
55 	 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
56 	 * it will cause an exception.
57 	 */
58 	if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
59 		write_sysreg(1 << 30, fpexc32_el2);
60 		isb();
61 	}
62 }
63 
__activate_traps_common(struct kvm_vcpu * vcpu)64 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
65 {
66 	/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
67 	write_sysreg(1 << 15, hstr_el2);
68 
69 	/*
70 	 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
71 	 * PMSELR_EL0 to make sure it never contains the cycle
72 	 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
73 	 * EL1 instead of being trapped to EL2.
74 	 */
75 	write_sysreg(0, pmselr_el0);
76 	write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
77 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
78 }
79 
__deactivate_traps_common(void)80 static void __hyp_text __deactivate_traps_common(void)
81 {
82 	write_sysreg(0, hstr_el2);
83 	write_sysreg(0, pmuserenr_el0);
84 }
85 
activate_traps_vhe(struct kvm_vcpu * vcpu)86 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
87 {
88 	u64 val;
89 
90 	val = read_sysreg(cpacr_el1);
91 	val |= CPACR_EL1_TTA;
92 	val &= ~CPACR_EL1_ZEN;
93 	if (update_fp_enabled(vcpu)) {
94 		if (vcpu_has_sve(vcpu))
95 			val |= CPACR_EL1_ZEN;
96 	} else {
97 		val &= ~CPACR_EL1_FPEN;
98 		__activate_traps_fpsimd32(vcpu);
99 	}
100 
101 	write_sysreg(val, cpacr_el1);
102 
103 	write_sysreg(kvm_get_hyp_vector(), vbar_el1);
104 }
105 NOKPROBE_SYMBOL(activate_traps_vhe);
106 
__activate_traps_nvhe(struct kvm_vcpu * vcpu)107 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
108 {
109 	u64 val;
110 
111 	__activate_traps_common(vcpu);
112 
113 	val = CPTR_EL2_DEFAULT;
114 	val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
115 	if (!update_fp_enabled(vcpu)) {
116 		val |= CPTR_EL2_TFP;
117 		__activate_traps_fpsimd32(vcpu);
118 	}
119 
120 	write_sysreg(val, cptr_el2);
121 }
122 
__activate_traps(struct kvm_vcpu * vcpu)123 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
124 {
125 	u64 hcr = vcpu->arch.hcr_el2;
126 
127 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
128 		hcr |= HCR_TVM;
129 
130 	write_sysreg(hcr, hcr_el2);
131 
132 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
133 		write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
134 
135 	if (has_vhe())
136 		activate_traps_vhe(vcpu);
137 	else
138 		__activate_traps_nvhe(vcpu);
139 }
140 
deactivate_traps_vhe(void)141 static void deactivate_traps_vhe(void)
142 {
143 	extern char vectors[];	/* kernel exception vectors */
144 	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
145 
146 	/*
147 	 * ARM erratum 1165522 requires the actual execution of the above
148 	 * before we can switch to the EL2/EL0 translation regime used by
149 	 * the host.
150 	 */
151 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
152 
153 	write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
154 	write_sysreg(vectors, vbar_el1);
155 }
156 NOKPROBE_SYMBOL(deactivate_traps_vhe);
157 
__deactivate_traps_nvhe(void)158 static void __hyp_text __deactivate_traps_nvhe(void)
159 {
160 	u64 mdcr_el2 = read_sysreg(mdcr_el2);
161 
162 	__deactivate_traps_common();
163 
164 	mdcr_el2 &= MDCR_EL2_HPMN_MASK;
165 	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
166 
167 	write_sysreg(mdcr_el2, mdcr_el2);
168 	write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
169 	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
170 }
171 
__deactivate_traps(struct kvm_vcpu * vcpu)172 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
173 {
174 	/*
175 	 * If we pended a virtual abort, preserve it until it gets
176 	 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
177 	 * the crucial bit is "On taking a vSError interrupt,
178 	 * HCR_EL2.VSE is cleared to 0."
179 	 */
180 	if (vcpu->arch.hcr_el2 & HCR_VSE) {
181 		vcpu->arch.hcr_el2 &= ~HCR_VSE;
182 		vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
183 	}
184 
185 	if (has_vhe())
186 		deactivate_traps_vhe();
187 	else
188 		__deactivate_traps_nvhe();
189 }
190 
activate_traps_vhe_load(struct kvm_vcpu * vcpu)191 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
192 {
193 	__activate_traps_common(vcpu);
194 }
195 
deactivate_traps_vhe_put(void)196 void deactivate_traps_vhe_put(void)
197 {
198 	u64 mdcr_el2 = read_sysreg(mdcr_el2);
199 
200 	mdcr_el2 &= MDCR_EL2_HPMN_MASK |
201 		    MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
202 		    MDCR_EL2_TPMS;
203 
204 	write_sysreg(mdcr_el2, mdcr_el2);
205 
206 	__deactivate_traps_common();
207 }
208 
__activate_vm(struct kvm * kvm)209 static void __hyp_text __activate_vm(struct kvm *kvm)
210 {
211 	__load_guest_stage2(kvm);
212 }
213 
__deactivate_vm(struct kvm_vcpu * vcpu)214 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
215 {
216 	write_sysreg(0, vttbr_el2);
217 }
218 
219 /* Save VGICv3 state on non-VHE systems */
__hyp_vgic_save_state(struct kvm_vcpu * vcpu)220 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
221 {
222 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
223 		__vgic_v3_save_state(vcpu);
224 		__vgic_v3_deactivate_traps(vcpu);
225 	}
226 }
227 
228 /* Restore VGICv3 state on non_VEH systems */
__hyp_vgic_restore_state(struct kvm_vcpu * vcpu)229 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
230 {
231 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
232 		__vgic_v3_activate_traps(vcpu);
233 		__vgic_v3_restore_state(vcpu);
234 	}
235 }
236 
__translate_far_to_hpfar(u64 far,u64 * hpfar)237 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
238 {
239 	u64 par, tmp;
240 
241 	/*
242 	 * Resolve the IPA the hard way using the guest VA.
243 	 *
244 	 * Stage-1 translation already validated the memory access
245 	 * rights. As such, we can use the EL1 translation regime, and
246 	 * don't have to distinguish between EL0 and EL1 access.
247 	 *
248 	 * We do need to save/restore PAR_EL1 though, as we haven't
249 	 * saved the guest context yet, and we may return early...
250 	 */
251 	par = read_sysreg(par_el1);
252 	asm volatile("at s1e1r, %0" : : "r" (far));
253 	isb();
254 
255 	tmp = read_sysreg(par_el1);
256 	write_sysreg(par, par_el1);
257 
258 	if (unlikely(tmp & SYS_PAR_EL1_F))
259 		return false; /* Translation failed, back to guest */
260 
261 	/* Convert PAR to HPFAR format */
262 	*hpfar = PAR_TO_HPFAR(tmp);
263 	return true;
264 }
265 
__populate_fault_info(struct kvm_vcpu * vcpu)266 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
267 {
268 	u8 ec;
269 	u64 esr;
270 	u64 hpfar, far;
271 
272 	esr = vcpu->arch.fault.esr_el2;
273 	ec = ESR_ELx_EC(esr);
274 
275 	if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
276 		return true;
277 
278 	far = read_sysreg_el2(SYS_FAR);
279 
280 	/*
281 	 * The HPFAR can be invalid if the stage 2 fault did not
282 	 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
283 	 * bit is clear) and one of the two following cases are true:
284 	 *   1. The fault was due to a permission fault
285 	 *   2. The processor carries errata 834220
286 	 *
287 	 * Therefore, for all non S1PTW faults where we either have a
288 	 * permission fault or the errata workaround is enabled, we
289 	 * resolve the IPA using the AT instruction.
290 	 */
291 	if (!(esr & ESR_ELx_S1PTW) &&
292 	    (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
293 	     (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
294 		if (!__translate_far_to_hpfar(far, &hpfar))
295 			return false;
296 	} else {
297 		hpfar = read_sysreg(hpfar_el2);
298 	}
299 
300 	vcpu->arch.fault.far_el2 = far;
301 	vcpu->arch.fault.hpfar_el2 = hpfar;
302 	return true;
303 }
304 
305 /* Check for an FPSIMD/SVE trap and handle as appropriate */
__hyp_handle_fpsimd(struct kvm_vcpu * vcpu)306 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
307 {
308 	bool vhe, sve_guest, sve_host;
309 	u8 hsr_ec;
310 
311 	if (!system_supports_fpsimd())
312 		return false;
313 
314 	if (system_supports_sve()) {
315 		sve_guest = vcpu_has_sve(vcpu);
316 		sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
317 		vhe = true;
318 	} else {
319 		sve_guest = false;
320 		sve_host = false;
321 		vhe = has_vhe();
322 	}
323 
324 	hsr_ec = kvm_vcpu_trap_get_class(vcpu);
325 	if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
326 	    hsr_ec != ESR_ELx_EC_SVE)
327 		return false;
328 
329 	/* Don't handle SVE traps for non-SVE vcpus here: */
330 	if (!sve_guest)
331 		if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
332 			return false;
333 
334 	/* Valid trap.  Switch the context: */
335 
336 	if (vhe) {
337 		u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
338 
339 		if (sve_guest)
340 			reg |= CPACR_EL1_ZEN;
341 
342 		write_sysreg(reg, cpacr_el1);
343 	} else {
344 		write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
345 			     cptr_el2);
346 	}
347 
348 	isb();
349 
350 	if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
351 		/*
352 		 * In the SVE case, VHE is assumed: it is enforced by
353 		 * Kconfig and kvm_arch_init().
354 		 */
355 		if (sve_host) {
356 			struct thread_struct *thread = container_of(
357 				vcpu->arch.host_fpsimd_state,
358 				struct thread_struct, uw.fpsimd_state);
359 
360 			sve_save_state(sve_pffr(thread),
361 				       &vcpu->arch.host_fpsimd_state->fpsr);
362 		} else {
363 			__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
364 		}
365 
366 		vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
367 	}
368 
369 	if (sve_guest) {
370 		sve_load_state(vcpu_sve_pffr(vcpu),
371 			       &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
372 			       sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
373 		write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
374 	} else {
375 		__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
376 	}
377 
378 	/* Skip restoring fpexc32 for AArch64 guests */
379 	if (!(read_sysreg(hcr_el2) & HCR_RW))
380 		write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
381 			     fpexc32_el2);
382 
383 	vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
384 
385 	return true;
386 }
387 
handle_tx2_tvm(struct kvm_vcpu * vcpu)388 static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
389 {
390 	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
391 	int rt = kvm_vcpu_sys_get_rt(vcpu);
392 	u64 val = vcpu_get_reg(vcpu, rt);
393 
394 	/*
395 	 * The normal sysreg handling code expects to see the traps,
396 	 * let's not do anything here.
397 	 */
398 	if (vcpu->arch.hcr_el2 & HCR_TVM)
399 		return false;
400 
401 	switch (sysreg) {
402 	case SYS_SCTLR_EL1:
403 		write_sysreg_el1(val, SYS_SCTLR);
404 		break;
405 	case SYS_TTBR0_EL1:
406 		write_sysreg_el1(val, SYS_TTBR0);
407 		break;
408 	case SYS_TTBR1_EL1:
409 		write_sysreg_el1(val, SYS_TTBR1);
410 		break;
411 	case SYS_TCR_EL1:
412 		write_sysreg_el1(val, SYS_TCR);
413 		break;
414 	case SYS_ESR_EL1:
415 		write_sysreg_el1(val, SYS_ESR);
416 		break;
417 	case SYS_FAR_EL1:
418 		write_sysreg_el1(val, SYS_FAR);
419 		break;
420 	case SYS_AFSR0_EL1:
421 		write_sysreg_el1(val, SYS_AFSR0);
422 		break;
423 	case SYS_AFSR1_EL1:
424 		write_sysreg_el1(val, SYS_AFSR1);
425 		break;
426 	case SYS_MAIR_EL1:
427 		write_sysreg_el1(val, SYS_MAIR);
428 		break;
429 	case SYS_AMAIR_EL1:
430 		write_sysreg_el1(val, SYS_AMAIR);
431 		break;
432 	case SYS_CONTEXTIDR_EL1:
433 		write_sysreg_el1(val, SYS_CONTEXTIDR);
434 		break;
435 	default:
436 		return false;
437 	}
438 
439 	__kvm_skip_instr(vcpu);
440 	return true;
441 }
442 
443 /*
444  * Return true when we were able to fixup the guest exit and should return to
445  * the guest, false when we should restore the host state and return to the
446  * main run loop.
447  */
fixup_guest_exit(struct kvm_vcpu * vcpu,u64 * exit_code)448 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
449 {
450 	if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
451 		vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
452 
453 	/*
454 	 * We're using the raw exception code in order to only process
455 	 * the trap if no SError is pending. We will come back to the
456 	 * same PC once the SError has been injected, and replay the
457 	 * trapping instruction.
458 	 */
459 	if (*exit_code != ARM_EXCEPTION_TRAP)
460 		goto exit;
461 
462 	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
463 	    kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
464 	    handle_tx2_tvm(vcpu))
465 		return true;
466 
467 	/*
468 	 * We trap the first access to the FP/SIMD to save the host context
469 	 * and restore the guest context lazily.
470 	 * If FP/SIMD is not implemented, handle the trap and inject an
471 	 * undefined instruction exception to the guest.
472 	 * Similarly for trapped SVE accesses.
473 	 */
474 	if (__hyp_handle_fpsimd(vcpu))
475 		return true;
476 
477 	if (!__populate_fault_info(vcpu))
478 		return true;
479 
480 	if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
481 		bool valid;
482 
483 		valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
484 			kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
485 			kvm_vcpu_dabt_isvalid(vcpu) &&
486 			!kvm_vcpu_dabt_isextabt(vcpu) &&
487 			!kvm_vcpu_dabt_iss1tw(vcpu);
488 
489 		if (valid) {
490 			int ret = __vgic_v2_perform_cpuif_access(vcpu);
491 
492 			if (ret == 1)
493 				return true;
494 
495 			/* Promote an illegal access to an SError.*/
496 			if (ret == -1)
497 				*exit_code = ARM_EXCEPTION_EL1_SERROR;
498 
499 			goto exit;
500 		}
501 	}
502 
503 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
504 	    (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
505 	     kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
506 		int ret = __vgic_v3_perform_cpuif_access(vcpu);
507 
508 		if (ret == 1)
509 			return true;
510 	}
511 
512 exit:
513 	/* Return to the host kernel and handle the exit */
514 	return false;
515 }
516 
__needs_ssbd_off(struct kvm_vcpu * vcpu)517 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
518 {
519 	if (!cpus_have_const_cap(ARM64_SSBD))
520 		return false;
521 
522 	return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
523 }
524 
__set_guest_arch_workaround_state(struct kvm_vcpu * vcpu)525 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
526 {
527 #ifdef CONFIG_ARM64_SSBD
528 	/*
529 	 * The host runs with the workaround always present. If the
530 	 * guest wants it disabled, so be it...
531 	 */
532 	if (__needs_ssbd_off(vcpu) &&
533 	    __hyp_this_cpu_read(arm64_ssbd_callback_required))
534 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
535 #endif
536 }
537 
__set_host_arch_workaround_state(struct kvm_vcpu * vcpu)538 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
539 {
540 #ifdef CONFIG_ARM64_SSBD
541 	/*
542 	 * If the guest has disabled the workaround, bring it back on.
543 	 */
544 	if (__needs_ssbd_off(vcpu) &&
545 	    __hyp_this_cpu_read(arm64_ssbd_callback_required))
546 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
547 #endif
548 }
549 
550 /**
551  * Disable host events, enable guest events
552  */
__pmu_switch_to_guest(struct kvm_cpu_context * host_ctxt)553 static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
554 {
555 	struct kvm_host_data *host;
556 	struct kvm_pmu_events *pmu;
557 
558 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
559 	pmu = &host->pmu_events;
560 
561 	if (pmu->events_host)
562 		write_sysreg(pmu->events_host, pmcntenclr_el0);
563 
564 	if (pmu->events_guest)
565 		write_sysreg(pmu->events_guest, pmcntenset_el0);
566 
567 	return (pmu->events_host || pmu->events_guest);
568 }
569 
570 /**
571  * Disable guest events, enable host events
572  */
__pmu_switch_to_host(struct kvm_cpu_context * host_ctxt)573 static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
574 {
575 	struct kvm_host_data *host;
576 	struct kvm_pmu_events *pmu;
577 
578 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
579 	pmu = &host->pmu_events;
580 
581 	if (pmu->events_guest)
582 		write_sysreg(pmu->events_guest, pmcntenclr_el0);
583 
584 	if (pmu->events_host)
585 		write_sysreg(pmu->events_host, pmcntenset_el0);
586 }
587 
588 /* Switch to the guest for VHE systems running in EL2 */
kvm_vcpu_run_vhe(struct kvm_vcpu * vcpu)589 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
590 {
591 	struct kvm_cpu_context *host_ctxt;
592 	struct kvm_cpu_context *guest_ctxt;
593 	u64 exit_code;
594 
595 	host_ctxt = vcpu->arch.host_cpu_context;
596 	host_ctxt->__hyp_running_vcpu = vcpu;
597 	guest_ctxt = &vcpu->arch.ctxt;
598 
599 	sysreg_save_host_state_vhe(host_ctxt);
600 
601 	/*
602 	 * ARM erratum 1165522 requires us to configure both stage 1 and
603 	 * stage 2 translation for the guest context before we clear
604 	 * HCR_EL2.TGE.
605 	 *
606 	 * We have already configured the guest's stage 1 translation in
607 	 * kvm_vcpu_load_sysregs above.  We must now call __activate_vm
608 	 * before __activate_traps, because __activate_vm configures
609 	 * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
610 	 * (among other things).
611 	 */
612 	__activate_vm(vcpu->kvm);
613 	__activate_traps(vcpu);
614 
615 	sysreg_restore_guest_state_vhe(guest_ctxt);
616 	__debug_switch_to_guest(vcpu);
617 
618 	__set_guest_arch_workaround_state(vcpu);
619 
620 	do {
621 		/* Jump in the fire! */
622 		exit_code = __guest_enter(vcpu, host_ctxt);
623 
624 		/* And we're baaack! */
625 	} while (fixup_guest_exit(vcpu, &exit_code));
626 
627 	__set_host_arch_workaround_state(vcpu);
628 
629 	sysreg_save_guest_state_vhe(guest_ctxt);
630 
631 	__deactivate_traps(vcpu);
632 
633 	sysreg_restore_host_state_vhe(host_ctxt);
634 
635 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
636 		__fpsimd_save_fpexc32(vcpu);
637 
638 	__debug_switch_to_host(vcpu);
639 
640 	return exit_code;
641 }
642 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
643 
644 /* Switch to the guest for legacy non-VHE systems */
__kvm_vcpu_run_nvhe(struct kvm_vcpu * vcpu)645 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
646 {
647 	struct kvm_cpu_context *host_ctxt;
648 	struct kvm_cpu_context *guest_ctxt;
649 	bool pmu_switch_needed;
650 	u64 exit_code;
651 
652 	/*
653 	 * Having IRQs masked via PMR when entering the guest means the GIC
654 	 * will not signal the CPU of interrupts of lower priority, and the
655 	 * only way to get out will be via guest exceptions.
656 	 * Naturally, we want to avoid this.
657 	 */
658 	if (system_uses_irq_prio_masking()) {
659 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
660 		dsb(sy);
661 	}
662 
663 	vcpu = kern_hyp_va(vcpu);
664 
665 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
666 	host_ctxt->__hyp_running_vcpu = vcpu;
667 	guest_ctxt = &vcpu->arch.ctxt;
668 
669 	pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
670 
671 	__sysreg_save_state_nvhe(host_ctxt);
672 
673 	__activate_vm(kern_hyp_va(vcpu->kvm));
674 	__activate_traps(vcpu);
675 
676 	__hyp_vgic_restore_state(vcpu);
677 	__timer_enable_traps(vcpu);
678 
679 	/*
680 	 * We must restore the 32-bit state before the sysregs, thanks
681 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
682 	 */
683 	__sysreg32_restore_state(vcpu);
684 	__sysreg_restore_state_nvhe(guest_ctxt);
685 	__debug_switch_to_guest(vcpu);
686 
687 	__set_guest_arch_workaround_state(vcpu);
688 
689 	do {
690 		/* Jump in the fire! */
691 		exit_code = __guest_enter(vcpu, host_ctxt);
692 
693 		/* And we're baaack! */
694 	} while (fixup_guest_exit(vcpu, &exit_code));
695 
696 	__set_host_arch_workaround_state(vcpu);
697 
698 	__sysreg_save_state_nvhe(guest_ctxt);
699 	__sysreg32_save_state(vcpu);
700 	__timer_disable_traps(vcpu);
701 	__hyp_vgic_save_state(vcpu);
702 
703 	__deactivate_traps(vcpu);
704 	__deactivate_vm(vcpu);
705 
706 	__sysreg_restore_state_nvhe(host_ctxt);
707 
708 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
709 		__fpsimd_save_fpexc32(vcpu);
710 
711 	/*
712 	 * This must come after restoring the host sysregs, since a non-VHE
713 	 * system may enable SPE here and make use of the TTBRs.
714 	 */
715 	__debug_switch_to_host(vcpu);
716 
717 	if (pmu_switch_needed)
718 		__pmu_switch_to_host(host_ctxt);
719 
720 	/* Returning to host will clear PSR.I, remask PMR if needed */
721 	if (system_uses_irq_prio_masking())
722 		gic_write_pmr(GIC_PRIO_IRQOFF);
723 
724 	return exit_code;
725 }
726 
727 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
728 
__hyp_call_panic_nvhe(u64 spsr,u64 elr,u64 par,struct kvm_cpu_context * __host_ctxt)729 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
730 					     struct kvm_cpu_context *__host_ctxt)
731 {
732 	struct kvm_vcpu *vcpu;
733 	unsigned long str_va;
734 
735 	vcpu = __host_ctxt->__hyp_running_vcpu;
736 
737 	if (read_sysreg(vttbr_el2)) {
738 		__timer_disable_traps(vcpu);
739 		__deactivate_traps(vcpu);
740 		__deactivate_vm(vcpu);
741 		__sysreg_restore_state_nvhe(__host_ctxt);
742 	}
743 
744 	/*
745 	 * Force the panic string to be loaded from the literal pool,
746 	 * making sure it is a kernel address and not a PC-relative
747 	 * reference.
748 	 */
749 	asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
750 
751 	__hyp_do_panic(str_va,
752 		       spsr, elr,
753 		       read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
754 		       read_sysreg(hpfar_el2), par, vcpu);
755 }
756 
__hyp_call_panic_vhe(u64 spsr,u64 elr,u64 par,struct kvm_cpu_context * host_ctxt)757 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
758 				 struct kvm_cpu_context *host_ctxt)
759 {
760 	struct kvm_vcpu *vcpu;
761 	vcpu = host_ctxt->__hyp_running_vcpu;
762 
763 	__deactivate_traps(vcpu);
764 	sysreg_restore_host_state_vhe(host_ctxt);
765 
766 	panic(__hyp_panic_string,
767 	      spsr,  elr,
768 	      read_sysreg_el2(SYS_ESR),   read_sysreg_el2(SYS_FAR),
769 	      read_sysreg(hpfar_el2), par, vcpu);
770 }
771 NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
772 
hyp_panic(struct kvm_cpu_context * host_ctxt)773 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
774 {
775 	u64 spsr = read_sysreg_el2(SYS_SPSR);
776 	u64 elr = read_sysreg_el2(SYS_ELR);
777 	u64 par = read_sysreg(par_el1);
778 
779 	if (!has_vhe())
780 		__hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
781 	else
782 		__hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
783 
784 	unreachable();
785 }
786