1 /*
2  * Copyright (C) 2012-2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/compiler.h>
19 #include <linux/kvm_host.h>
20 
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 
25 /*
26  * Non-VHE: Both host and guest must save everything.
27  *
28  * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
29  * which are handled as part of the el2 return state) on every switch.
30  * tpidr_el0 and tpidrro_el0 only need to be switched when going
31  * to host userspace or a different VCPU.  EL1 registers only need to be
32  * switched when potentially going to run a different VCPU.  The latter two
33  * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
34  */
35 
__sysreg_save_common_state(struct kvm_cpu_context * ctxt)36 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
37 {
38 	ctxt->sys_regs[MDSCR_EL1]	= read_sysreg(mdscr_el1);
39 
40 	/*
41 	 * The host arm64 Linux uses sp_el0 to point to 'current' and it must
42 	 * therefore be saved/restored on every entry/exit to/from the guest.
43 	 */
44 	ctxt->gp_regs.regs.sp		= read_sysreg(sp_el0);
45 }
46 
__sysreg_save_user_state(struct kvm_cpu_context * ctxt)47 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
48 {
49 	ctxt->sys_regs[TPIDR_EL0]	= read_sysreg(tpidr_el0);
50 	ctxt->sys_regs[TPIDRRO_EL0]	= read_sysreg(tpidrro_el0);
51 }
52 
__sysreg_save_el1_state(struct kvm_cpu_context * ctxt)53 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
54 {
55 	ctxt->sys_regs[MPIDR_EL1]	= read_sysreg(vmpidr_el2);
56 	ctxt->sys_regs[CSSELR_EL1]	= read_sysreg(csselr_el1);
57 	ctxt->sys_regs[SCTLR_EL1]	= read_sysreg_el1(sctlr);
58 	ctxt->sys_regs[ACTLR_EL1]	= read_sysreg(actlr_el1);
59 	ctxt->sys_regs[CPACR_EL1]	= read_sysreg_el1(cpacr);
60 	ctxt->sys_regs[TTBR0_EL1]	= read_sysreg_el1(ttbr0);
61 	ctxt->sys_regs[TTBR1_EL1]	= read_sysreg_el1(ttbr1);
62 	ctxt->sys_regs[TCR_EL1]		= read_sysreg_el1(tcr);
63 	ctxt->sys_regs[ESR_EL1]		= read_sysreg_el1(esr);
64 	ctxt->sys_regs[AFSR0_EL1]	= read_sysreg_el1(afsr0);
65 	ctxt->sys_regs[AFSR1_EL1]	= read_sysreg_el1(afsr1);
66 	ctxt->sys_regs[FAR_EL1]		= read_sysreg_el1(far);
67 	ctxt->sys_regs[MAIR_EL1]	= read_sysreg_el1(mair);
68 	ctxt->sys_regs[VBAR_EL1]	= read_sysreg_el1(vbar);
69 	ctxt->sys_regs[CONTEXTIDR_EL1]	= read_sysreg_el1(contextidr);
70 	ctxt->sys_regs[AMAIR_EL1]	= read_sysreg_el1(amair);
71 	ctxt->sys_regs[CNTKCTL_EL1]	= read_sysreg_el1(cntkctl);
72 	ctxt->sys_regs[PAR_EL1]		= read_sysreg(par_el1);
73 	ctxt->sys_regs[TPIDR_EL1]	= read_sysreg(tpidr_el1);
74 
75 	ctxt->gp_regs.sp_el1		= read_sysreg(sp_el1);
76 	ctxt->gp_regs.elr_el1		= read_sysreg_el1(elr);
77 	ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
78 }
79 
__sysreg_save_el2_return_state(struct kvm_cpu_context * ctxt)80 static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
81 {
82 	ctxt->gp_regs.regs.pc		= read_sysreg_el2(elr);
83 	ctxt->gp_regs.regs.pstate	= read_sysreg_el2(spsr);
84 
85 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
86 		ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
87 }
88 
__sysreg_save_state_nvhe(struct kvm_cpu_context * ctxt)89 void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
90 {
91 	__sysreg_save_el1_state(ctxt);
92 	__sysreg_save_common_state(ctxt);
93 	__sysreg_save_user_state(ctxt);
94 	__sysreg_save_el2_return_state(ctxt);
95 }
96 
sysreg_save_host_state_vhe(struct kvm_cpu_context * ctxt)97 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
98 {
99 	__sysreg_save_common_state(ctxt);
100 }
101 
sysreg_save_guest_state_vhe(struct kvm_cpu_context * ctxt)102 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
103 {
104 	__sysreg_save_common_state(ctxt);
105 	__sysreg_save_el2_return_state(ctxt);
106 }
107 
__sysreg_restore_common_state(struct kvm_cpu_context * ctxt)108 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
109 {
110 	write_sysreg(ctxt->sys_regs[MDSCR_EL1],	  mdscr_el1);
111 
112 	/*
113 	 * The host arm64 Linux uses sp_el0 to point to 'current' and it must
114 	 * therefore be saved/restored on every entry/exit to/from the guest.
115 	 */
116 	write_sysreg(ctxt->gp_regs.regs.sp,	  sp_el0);
117 }
118 
__sysreg_restore_user_state(struct kvm_cpu_context * ctxt)119 static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
120 {
121 	write_sysreg(ctxt->sys_regs[TPIDR_EL0],	  	tpidr_el0);
122 	write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], 	tpidrro_el0);
123 }
124 
__sysreg_restore_el1_state(struct kvm_cpu_context * ctxt)125 static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
126 {
127 	write_sysreg(ctxt->sys_regs[MPIDR_EL1],		vmpidr_el2);
128 	write_sysreg(ctxt->sys_regs[CSSELR_EL1],	csselr_el1);
129 	write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],	sctlr);
130 	write_sysreg(ctxt->sys_regs[ACTLR_EL1],	  	actlr_el1);
131 	write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],	cpacr);
132 	write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],	ttbr0);
133 	write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],	ttbr1);
134 	write_sysreg_el1(ctxt->sys_regs[TCR_EL1],	tcr);
135 	write_sysreg_el1(ctxt->sys_regs[ESR_EL1],	esr);
136 	write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],	afsr0);
137 	write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],	afsr1);
138 	write_sysreg_el1(ctxt->sys_regs[FAR_EL1],	far);
139 	write_sysreg_el1(ctxt->sys_regs[MAIR_EL1],	mair);
140 	write_sysreg_el1(ctxt->sys_regs[VBAR_EL1],	vbar);
141 	write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
142 	write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],	amair);
143 	write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], 	cntkctl);
144 	write_sysreg(ctxt->sys_regs[PAR_EL1],		par_el1);
145 	write_sysreg(ctxt->sys_regs[TPIDR_EL1],		tpidr_el1);
146 
147 	write_sysreg(ctxt->gp_regs.sp_el1,		sp_el1);
148 	write_sysreg_el1(ctxt->gp_regs.elr_el1,		elr);
149 	write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
150 }
151 
152 static void __hyp_text
__sysreg_restore_el2_return_state(struct kvm_cpu_context * ctxt)153 __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
154 {
155 	write_sysreg_el2(ctxt->gp_regs.regs.pc,		elr);
156 	write_sysreg_el2(ctxt->gp_regs.regs.pstate,	spsr);
157 
158 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
159 		write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
160 }
161 
__sysreg_restore_state_nvhe(struct kvm_cpu_context * ctxt)162 void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
163 {
164 	__sysreg_restore_el1_state(ctxt);
165 	__sysreg_restore_common_state(ctxt);
166 	__sysreg_restore_user_state(ctxt);
167 	__sysreg_restore_el2_return_state(ctxt);
168 }
169 
sysreg_restore_host_state_vhe(struct kvm_cpu_context * ctxt)170 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
171 {
172 	__sysreg_restore_common_state(ctxt);
173 }
174 
sysreg_restore_guest_state_vhe(struct kvm_cpu_context * ctxt)175 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
176 {
177 	__sysreg_restore_common_state(ctxt);
178 	__sysreg_restore_el2_return_state(ctxt);
179 }
180 
__sysreg32_save_state(struct kvm_vcpu * vcpu)181 void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
182 {
183 	u64 *spsr, *sysreg;
184 
185 	if (!vcpu_el1_is_32bit(vcpu))
186 		return;
187 
188 	spsr = vcpu->arch.ctxt.gp_regs.spsr;
189 	sysreg = vcpu->arch.ctxt.sys_regs;
190 
191 	spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
192 	spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
193 	spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
194 	spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
195 
196 	sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
197 	sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
198 
199 	if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
200 		sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
201 }
202 
__sysreg32_restore_state(struct kvm_vcpu * vcpu)203 void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
204 {
205 	u64 *spsr, *sysreg;
206 
207 	if (!vcpu_el1_is_32bit(vcpu))
208 		return;
209 
210 	spsr = vcpu->arch.ctxt.gp_regs.spsr;
211 	sysreg = vcpu->arch.ctxt.sys_regs;
212 
213 	write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
214 	write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
215 	write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
216 	write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
217 
218 	write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
219 	write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
220 
221 	if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
222 		write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
223 }
224 
225 /**
226  * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
227  *
228  * @vcpu: The VCPU pointer
229  *
230  * Load system registers that do not affect the host's execution, for
231  * example EL1 system registers on a VHE system where the host kernel
232  * runs at EL2.  This function is called from KVM's vcpu_load() function
233  * and loading system register state early avoids having to load them on
234  * every entry to the VM.
235  */
kvm_vcpu_load_sysregs(struct kvm_vcpu * vcpu)236 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
237 {
238 	struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
239 	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
240 
241 	if (!has_vhe())
242 		return;
243 
244 	__sysreg_save_user_state(host_ctxt);
245 
246 	/*
247 	 * Load guest EL1 and user state
248 	 *
249 	 * We must restore the 32-bit state before the sysregs, thanks
250 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
251 	 */
252 	__sysreg32_restore_state(vcpu);
253 	__sysreg_restore_user_state(guest_ctxt);
254 	__sysreg_restore_el1_state(guest_ctxt);
255 
256 	vcpu->arch.sysregs_loaded_on_cpu = true;
257 
258 	activate_traps_vhe_load(vcpu);
259 }
260 
261 /**
262  * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
263  *
264  * @vcpu: The VCPU pointer
265  *
266  * Save guest system registers that do not affect the host's execution, for
267  * example EL1 system registers on a VHE system where the host kernel
268  * runs at EL2.  This function is called from KVM's vcpu_put() function
269  * and deferring saving system register state until we're no longer running the
270  * VCPU avoids having to save them on every exit from the VM.
271  */
kvm_vcpu_put_sysregs(struct kvm_vcpu * vcpu)272 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
273 {
274 	struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
275 	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
276 
277 	if (!has_vhe())
278 		return;
279 
280 	deactivate_traps_vhe_put();
281 
282 	__sysreg_save_el1_state(guest_ctxt);
283 	__sysreg_save_user_state(guest_ctxt);
284 	__sysreg32_save_state(vcpu);
285 
286 	/* Restore host user state */
287 	__sysreg_restore_user_state(host_ctxt);
288 
289 	vcpu->arch.sysregs_loaded_on_cpu = false;
290 }
291