1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 #include <linux/jump_label.h>
7 
8 #include <asm/kvm_asm.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 
12 __asm__(".arch_extension     virt");
13 
14 /*
15  * Activate the traps, saving the host's fpexc register before
16  * overwriting it. We'll restore it on VM exit.
17  */
__activate_traps(struct kvm_vcpu * vcpu,u32 * fpexc_host)18 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
19 {
20 	u32 val;
21 
22 	/*
23 	 * We are about to set HCPTR.TCP10/11 to trap all floating point
24 	 * register accesses to HYP, however, the ARM ARM clearly states that
25 	 * traps are only taken to HYP if the operation would not otherwise
26 	 * trap to SVC.  Therefore, always make sure that for 32-bit guests,
27 	 * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
28 	 */
29 	val = read_sysreg(VFP_FPEXC);
30 	*fpexc_host = val;
31 	if (!(val & FPEXC_EN)) {
32 		write_sysreg(val | FPEXC_EN, VFP_FPEXC);
33 		isb();
34 	}
35 
36 	write_sysreg(vcpu->arch.hcr, HCR);
37 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
38 	write_sysreg(HSTR_T(15), HSTR);
39 	write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
40 	val = read_sysreg(HDCR);
41 	val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
42 	val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
43 	write_sysreg(val, HDCR);
44 }
45 
__deactivate_traps(struct kvm_vcpu * vcpu)46 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
47 {
48 	u32 val;
49 
50 	/*
51 	 * If we pended a virtual abort, preserve it until it gets
52 	 * cleared. See B1.9.9 (Virtual Abort exception) for details,
53 	 * but the crucial bit is the zeroing of HCR.VA in the
54 	 * pseudocode.
55 	 */
56 	if (vcpu->arch.hcr & HCR_VA)
57 		vcpu->arch.hcr = read_sysreg(HCR);
58 
59 	write_sysreg(0, HCR);
60 	write_sysreg(0, HSTR);
61 	val = read_sysreg(HDCR);
62 	write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
63 	write_sysreg(0, HCPTR);
64 }
65 
__activate_vm(struct kvm_vcpu * vcpu)66 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
67 {
68 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
69 	write_sysreg(kvm_get_vttbr(kvm), VTTBR);
70 	write_sysreg(vcpu->arch.midr, VPIDR);
71 }
72 
__deactivate_vm(struct kvm_vcpu * vcpu)73 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
74 {
75 	write_sysreg(0, VTTBR);
76 	write_sysreg(read_sysreg(MIDR), VPIDR);
77 }
78 
79 
__vgic_save_state(struct kvm_vcpu * vcpu)80 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
81 {
82 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
83 		__vgic_v3_save_state(vcpu);
84 		__vgic_v3_deactivate_traps(vcpu);
85 	}
86 }
87 
__vgic_restore_state(struct kvm_vcpu * vcpu)88 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
89 {
90 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
91 		__vgic_v3_activate_traps(vcpu);
92 		__vgic_v3_restore_state(vcpu);
93 	}
94 }
95 
__populate_fault_info(struct kvm_vcpu * vcpu)96 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
97 {
98 	u32 hsr = read_sysreg(HSR);
99 	u8 ec = hsr >> HSR_EC_SHIFT;
100 	u32 hpfar, far;
101 
102 	vcpu->arch.fault.hsr = hsr;
103 
104 	if (ec == HSR_EC_IABT)
105 		far = read_sysreg(HIFAR);
106 	else if (ec == HSR_EC_DABT)
107 		far = read_sysreg(HDFAR);
108 	else
109 		return true;
110 
111 	/*
112 	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
113 	 *
114 	 * Abort on the stage 2 translation for a memory access from a
115 	 * Non-secure PL1 or PL0 mode:
116 	 *
117 	 * For any Access flag fault or Translation fault, and also for any
118 	 * Permission fault on the stage 2 translation of a memory access
119 	 * made as part of a translation table walk for a stage 1 translation,
120 	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
121 	 * is UNKNOWN.
122 	 */
123 	if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
124 		u64 par, tmp;
125 
126 		par = read_sysreg(PAR);
127 		write_sysreg(far, ATS1CPR);
128 		isb();
129 
130 		tmp = read_sysreg(PAR);
131 		write_sysreg(par, PAR);
132 
133 		if (unlikely(tmp & 1))
134 			return false; /* Translation failed, back to guest */
135 
136 		hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
137 	} else {
138 		hpfar = read_sysreg(HPFAR);
139 	}
140 
141 	vcpu->arch.fault.hxfar = far;
142 	vcpu->arch.fault.hpfar = hpfar;
143 	return true;
144 }
145 
__kvm_vcpu_run_nvhe(struct kvm_vcpu * vcpu)146 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
147 {
148 	struct kvm_cpu_context *host_ctxt;
149 	struct kvm_cpu_context *guest_ctxt;
150 	bool fp_enabled;
151 	u64 exit_code;
152 	u32 fpexc;
153 
154 	vcpu = kern_hyp_va(vcpu);
155 	write_sysreg(vcpu, HTPIDR);
156 
157 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
158 	guest_ctxt = &vcpu->arch.ctxt;
159 
160 	__sysreg_save_state(host_ctxt);
161 	__banked_save_state(host_ctxt);
162 
163 	__activate_traps(vcpu, &fpexc);
164 	__activate_vm(vcpu);
165 
166 	__vgic_restore_state(vcpu);
167 	__timer_enable_traps(vcpu);
168 
169 	__sysreg_restore_state(guest_ctxt);
170 	__banked_restore_state(guest_ctxt);
171 
172 	/* Jump in the fire! */
173 again:
174 	exit_code = __guest_enter(vcpu, host_ctxt);
175 	/* And we're baaack! */
176 
177 	if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
178 		goto again;
179 
180 	fp_enabled = __vfp_enabled();
181 
182 	__banked_save_state(guest_ctxt);
183 	__sysreg_save_state(guest_ctxt);
184 	__timer_disable_traps(vcpu);
185 
186 	__vgic_save_state(vcpu);
187 
188 	__deactivate_traps(vcpu);
189 	__deactivate_vm(vcpu);
190 
191 	__banked_restore_state(host_ctxt);
192 	__sysreg_restore_state(host_ctxt);
193 
194 	if (fp_enabled) {
195 		__vfp_save_state(&guest_ctxt->vfp);
196 		__vfp_restore_state(&host_ctxt->vfp);
197 	}
198 
199 	write_sysreg(fpexc, VFP_FPEXC);
200 
201 	return exit_code;
202 }
203 
204 static const char * const __hyp_panic_string[] = {
205 	[ARM_EXCEPTION_RESET]      = "\nHYP panic: RST   PC:%08x CPSR:%08x",
206 	[ARM_EXCEPTION_UNDEFINED]  = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
207 	[ARM_EXCEPTION_SOFTWARE]   = "\nHYP panic: SVC   PC:%08x CPSR:%08x",
208 	[ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
209 	[ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
210 	[ARM_EXCEPTION_IRQ]        = "\nHYP panic: IRQ   PC:%08x CPSR:%08x",
211 	[ARM_EXCEPTION_FIQ]        = "\nHYP panic: FIQ   PC:%08x CPSR:%08x",
212 	[ARM_EXCEPTION_HVC]        = "\nHYP panic: HVC   PC:%08x CPSR:%08x",
213 };
214 
__hyp_panic(int cause)215 void __hyp_text __noreturn __hyp_panic(int cause)
216 {
217 	u32 elr = read_special(ELR_hyp);
218 	u32 val;
219 
220 	if (cause == ARM_EXCEPTION_DATA_ABORT)
221 		val = read_sysreg(HDFAR);
222 	else
223 		val = read_special(SPSR);
224 
225 	if (read_sysreg(VTTBR)) {
226 		struct kvm_vcpu *vcpu;
227 		struct kvm_cpu_context *host_ctxt;
228 
229 		vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
230 		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
231 		__timer_disable_traps(vcpu);
232 		__deactivate_traps(vcpu);
233 		__deactivate_vm(vcpu);
234 		__banked_restore_state(host_ctxt);
235 		__sysreg_restore_state(host_ctxt);
236 	}
237 
238 	/* Call panic for real */
239 	__hyp_do_panic(__hyp_panic_string[cause], elr, val);
240 
241 	unreachable();
242 }
243