1 /*
2  * Copyright (C) 2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/jump_label.h>
18 
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_hyp.h>
21 #include <asm/kvm_mmu.h>
22 
23 __asm__(".arch_extension     virt");
24 
25 /*
26  * Activate the traps, saving the host's fpexc register before
27  * overwriting it. We'll restore it on VM exit.
28  */
__activate_traps(struct kvm_vcpu * vcpu,u32 * fpexc_host)29 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
30 {
31 	u32 val;
32 
33 	/*
34 	 * We are about to set HCPTR.TCP10/11 to trap all floating point
35 	 * register accesses to HYP, however, the ARM ARM clearly states that
36 	 * traps are only taken to HYP if the operation would not otherwise
37 	 * trap to SVC.  Therefore, always make sure that for 32-bit guests,
38 	 * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
39 	 */
40 	val = read_sysreg(VFP_FPEXC);
41 	*fpexc_host = val;
42 	if (!(val & FPEXC_EN)) {
43 		write_sysreg(val | FPEXC_EN, VFP_FPEXC);
44 		isb();
45 	}
46 
47 	write_sysreg(vcpu->arch.hcr, HCR);
48 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
49 	write_sysreg(HSTR_T(15), HSTR);
50 	write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
51 	val = read_sysreg(HDCR);
52 	val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
53 	val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
54 	write_sysreg(val, HDCR);
55 }
56 
__deactivate_traps(struct kvm_vcpu * vcpu)57 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
58 {
59 	u32 val;
60 
61 	/*
62 	 * If we pended a virtual abort, preserve it until it gets
63 	 * cleared. See B1.9.9 (Virtual Abort exception) for details,
64 	 * but the crucial bit is the zeroing of HCR.VA in the
65 	 * pseudocode.
66 	 */
67 	if (vcpu->arch.hcr & HCR_VA)
68 		vcpu->arch.hcr = read_sysreg(HCR);
69 
70 	write_sysreg(0, HCR);
71 	write_sysreg(0, HSTR);
72 	val = read_sysreg(HDCR);
73 	write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
74 	write_sysreg(0, HCPTR);
75 }
76 
__activate_vm(struct kvm_vcpu * vcpu)77 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
78 {
79 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
80 	write_sysreg(kvm->arch.vttbr, VTTBR);
81 	write_sysreg(vcpu->arch.midr, VPIDR);
82 }
83 
__deactivate_vm(struct kvm_vcpu * vcpu)84 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
85 {
86 	write_sysreg(0, VTTBR);
87 	write_sysreg(read_sysreg(MIDR), VPIDR);
88 }
89 
90 
__vgic_save_state(struct kvm_vcpu * vcpu)91 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
92 {
93 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
94 		__vgic_v3_save_state(vcpu);
95 		__vgic_v3_deactivate_traps(vcpu);
96 	}
97 }
98 
__vgic_restore_state(struct kvm_vcpu * vcpu)99 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
100 {
101 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
102 		__vgic_v3_activate_traps(vcpu);
103 		__vgic_v3_restore_state(vcpu);
104 	}
105 }
106 
__populate_fault_info(struct kvm_vcpu * vcpu)107 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
108 {
109 	u32 hsr = read_sysreg(HSR);
110 	u8 ec = hsr >> HSR_EC_SHIFT;
111 	u32 hpfar, far;
112 
113 	vcpu->arch.fault.hsr = hsr;
114 
115 	if (ec == HSR_EC_IABT)
116 		far = read_sysreg(HIFAR);
117 	else if (ec == HSR_EC_DABT)
118 		far = read_sysreg(HDFAR);
119 	else
120 		return true;
121 
122 	/*
123 	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
124 	 *
125 	 * Abort on the stage 2 translation for a memory access from a
126 	 * Non-secure PL1 or PL0 mode:
127 	 *
128 	 * For any Access flag fault or Translation fault, and also for any
129 	 * Permission fault on the stage 2 translation of a memory access
130 	 * made as part of a translation table walk for a stage 1 translation,
131 	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
132 	 * is UNKNOWN.
133 	 */
134 	if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
135 		u64 par, tmp;
136 
137 		par = read_sysreg(PAR);
138 		write_sysreg(far, ATS1CPR);
139 		isb();
140 
141 		tmp = read_sysreg(PAR);
142 		write_sysreg(par, PAR);
143 
144 		if (unlikely(tmp & 1))
145 			return false; /* Translation failed, back to guest */
146 
147 		hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
148 	} else {
149 		hpfar = read_sysreg(HPFAR);
150 	}
151 
152 	vcpu->arch.fault.hxfar = far;
153 	vcpu->arch.fault.hpfar = hpfar;
154 	return true;
155 }
156 
__kvm_vcpu_run_nvhe(struct kvm_vcpu * vcpu)157 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
158 {
159 	struct kvm_cpu_context *host_ctxt;
160 	struct kvm_cpu_context *guest_ctxt;
161 	bool fp_enabled;
162 	u64 exit_code;
163 	u32 fpexc;
164 
165 	vcpu = kern_hyp_va(vcpu);
166 	write_sysreg(vcpu, HTPIDR);
167 
168 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
169 	guest_ctxt = &vcpu->arch.ctxt;
170 
171 	__sysreg_save_state(host_ctxt);
172 	__banked_save_state(host_ctxt);
173 
174 	__activate_traps(vcpu, &fpexc);
175 	__activate_vm(vcpu);
176 
177 	__vgic_restore_state(vcpu);
178 	__timer_enable_traps(vcpu);
179 
180 	__sysreg_restore_state(guest_ctxt);
181 	__banked_restore_state(guest_ctxt);
182 
183 	/* Jump in the fire! */
184 again:
185 	exit_code = __guest_enter(vcpu, host_ctxt);
186 	/* And we're baaack! */
187 
188 	if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
189 		goto again;
190 
191 	fp_enabled = __vfp_enabled();
192 
193 	__banked_save_state(guest_ctxt);
194 	__sysreg_save_state(guest_ctxt);
195 	__timer_disable_traps(vcpu);
196 
197 	__vgic_save_state(vcpu);
198 
199 	__deactivate_traps(vcpu);
200 	__deactivate_vm(vcpu);
201 
202 	__banked_restore_state(host_ctxt);
203 	__sysreg_restore_state(host_ctxt);
204 
205 	if (fp_enabled) {
206 		__vfp_save_state(&guest_ctxt->vfp);
207 		__vfp_restore_state(&host_ctxt->vfp);
208 	}
209 
210 	write_sysreg(fpexc, VFP_FPEXC);
211 
212 	return exit_code;
213 }
214 
215 static const char * const __hyp_panic_string[] = {
216 	[ARM_EXCEPTION_RESET]      = "\nHYP panic: RST   PC:%08x CPSR:%08x",
217 	[ARM_EXCEPTION_UNDEFINED]  = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
218 	[ARM_EXCEPTION_SOFTWARE]   = "\nHYP panic: SVC   PC:%08x CPSR:%08x",
219 	[ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
220 	[ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
221 	[ARM_EXCEPTION_IRQ]        = "\nHYP panic: IRQ   PC:%08x CPSR:%08x",
222 	[ARM_EXCEPTION_FIQ]        = "\nHYP panic: FIQ   PC:%08x CPSR:%08x",
223 	[ARM_EXCEPTION_HVC]        = "\nHYP panic: HVC   PC:%08x CPSR:%08x",
224 };
225 
__hyp_panic(int cause)226 void __hyp_text __noreturn __hyp_panic(int cause)
227 {
228 	u32 elr = read_special(ELR_hyp);
229 	u32 val;
230 
231 	if (cause == ARM_EXCEPTION_DATA_ABORT)
232 		val = read_sysreg(HDFAR);
233 	else
234 		val = read_special(SPSR);
235 
236 	if (read_sysreg(VTTBR)) {
237 		struct kvm_vcpu *vcpu;
238 		struct kvm_cpu_context *host_ctxt;
239 
240 		vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
241 		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
242 		__timer_disable_traps(vcpu);
243 		__deactivate_traps(vcpu);
244 		__deactivate_vm(vcpu);
245 		__banked_restore_state(host_ctxt);
246 		__sysreg_restore_state(host_ctxt);
247 	}
248 
249 	/* Call panic for real */
250 	__hyp_do_panic(__hyp_panic_string[cause], elr, val);
251 
252 	unreachable();
253 }
254