1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #ifndef __ARM64_KVM_EMULATE_H__
23 #define __ARM64_KVM_EMULATE_H__
24
25 #include <linux/kvm_host.h>
26
27 #include <asm/esr.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_hyp.h>
30 #include <asm/kvm_mmio.h>
31 #include <asm/ptrace.h>
32 #include <asm/cputype.h>
33 #include <asm/virt.h>
34
35 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
36 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
37 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
38
39 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
40 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
41
42 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
43 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
44 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
45 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
46 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
47 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
48 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
49
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)50 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
51 {
52 return !(vcpu->arch.hcr_el2 & HCR_RW);
53 }
54
vcpu_reset_hcr(struct kvm_vcpu * vcpu)55 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
56 {
57 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
58 if (is_kernel_in_hyp_mode())
59 vcpu->arch.hcr_el2 |= HCR_E2H;
60 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
61 /* route synchronous external abort exceptions to EL2 */
62 vcpu->arch.hcr_el2 |= HCR_TEA;
63 /* trap error record accesses */
64 vcpu->arch.hcr_el2 |= HCR_TERR;
65 }
66 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
67 vcpu->arch.hcr_el2 |= HCR_FWB;
68
69 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
70 vcpu->arch.hcr_el2 &= ~HCR_RW;
71
72 /*
73 * TID3: trap feature register accesses that we virtualise.
74 * For now this is conditional, since no AArch32 feature regs
75 * are currently virtualised.
76 */
77 if (!vcpu_el1_is_32bit(vcpu))
78 vcpu->arch.hcr_el2 |= HCR_TID3;
79 }
80
vcpu_hcr(struct kvm_vcpu * vcpu)81 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
82 {
83 return (unsigned long *)&vcpu->arch.hcr_el2;
84 }
85
vcpu_clear_wfe_traps(struct kvm_vcpu * vcpu)86 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
87 {
88 vcpu->arch.hcr_el2 &= ~HCR_TWE;
89 }
90
vcpu_set_wfe_traps(struct kvm_vcpu * vcpu)91 static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
92 {
93 vcpu->arch.hcr_el2 |= HCR_TWE;
94 }
95
vcpu_get_vsesr(struct kvm_vcpu * vcpu)96 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
97 {
98 return vcpu->arch.vsesr_el2;
99 }
100
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)101 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
102 {
103 vcpu->arch.vsesr_el2 = vsesr;
104 }
105
vcpu_pc(const struct kvm_vcpu * vcpu)106 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
107 {
108 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
109 }
110
__vcpu_elr_el1(const struct kvm_vcpu * vcpu)111 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
112 {
113 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
114 }
115
vcpu_read_elr_el1(const struct kvm_vcpu * vcpu)116 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
117 {
118 if (vcpu->arch.sysregs_loaded_on_cpu)
119 return read_sysreg_el1(elr);
120 else
121 return *__vcpu_elr_el1(vcpu);
122 }
123
vcpu_write_elr_el1(const struct kvm_vcpu * vcpu,unsigned long v)124 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
125 {
126 if (vcpu->arch.sysregs_loaded_on_cpu)
127 write_sysreg_el1(v, elr);
128 else
129 *__vcpu_elr_el1(vcpu) = v;
130 }
131
vcpu_cpsr(const struct kvm_vcpu * vcpu)132 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
133 {
134 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
135 }
136
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)137 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
138 {
139 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
140 }
141
kvm_condition_valid(const struct kvm_vcpu * vcpu)142 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
143 {
144 if (vcpu_mode_is_32bit(vcpu))
145 return kvm_condition_valid32(vcpu);
146
147 return true;
148 }
149
kvm_skip_instr(struct kvm_vcpu * vcpu,bool is_wide_instr)150 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
151 {
152 if (vcpu_mode_is_32bit(vcpu))
153 kvm_skip_instr32(vcpu, is_wide_instr);
154 else
155 *vcpu_pc(vcpu) += 4;
156 }
157
vcpu_set_thumb(struct kvm_vcpu * vcpu)158 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
159 {
160 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
161 }
162
163 /*
164 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
165 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
166 * AArch32 with banked registers.
167 */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)168 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
169 u8 reg_num)
170 {
171 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
172 }
173
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)174 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
175 unsigned long val)
176 {
177 if (reg_num != 31)
178 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
179 }
180
vcpu_read_spsr(const struct kvm_vcpu * vcpu)181 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
182 {
183 if (vcpu_mode_is_32bit(vcpu))
184 return vcpu_read_spsr32(vcpu);
185
186 if (vcpu->arch.sysregs_loaded_on_cpu)
187 return read_sysreg_el1(spsr);
188 else
189 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
190 }
191
vcpu_write_spsr(struct kvm_vcpu * vcpu,unsigned long v)192 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
193 {
194 if (vcpu_mode_is_32bit(vcpu)) {
195 vcpu_write_spsr32(vcpu, v);
196 return;
197 }
198
199 if (vcpu->arch.sysregs_loaded_on_cpu)
200 write_sysreg_el1(v, spsr);
201 else
202 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
203 }
204
vcpu_mode_priv(const struct kvm_vcpu * vcpu)205 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
206 {
207 u32 mode;
208
209 if (vcpu_mode_is_32bit(vcpu)) {
210 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
211 return mode > PSR_AA32_MODE_USR;
212 }
213
214 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
215
216 return mode != PSR_MODE_EL0t;
217 }
218
kvm_vcpu_get_hsr(const struct kvm_vcpu * vcpu)219 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
220 {
221 return vcpu->arch.fault.esr_el2;
222 }
223
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)224 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
225 {
226 u32 esr = kvm_vcpu_get_hsr(vcpu);
227
228 if (esr & ESR_ELx_CV)
229 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
230
231 return -1;
232 }
233
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)234 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
235 {
236 return vcpu->arch.fault.far_el2;
237 }
238
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)239 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
240 {
241 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
242 }
243
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)244 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
245 {
246 return vcpu->arch.fault.disr_el1;
247 }
248
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)249 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
250 {
251 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
252 }
253
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)254 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
255 {
256 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
257 }
258
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)259 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
260 {
261 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
262 }
263
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)264 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
265 {
266 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
267 }
268
kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu * vcpu)269 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
270 {
271 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
272 }
273
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)274 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
275 {
276 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
277 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
278 }
279
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)280 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
281 {
282 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
283 }
284
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)285 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
286 {
287 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
288 }
289
290 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)291 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
292 {
293 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
294 }
295
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)296 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
297 {
298 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
299 }
300
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)301 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
302 {
303 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
304 }
305
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)306 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
307 {
308 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
309 }
310
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)311 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
312 {
313 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
314 }
315
kvm_vcpu_dabt_isextabt(const struct kvm_vcpu * vcpu)316 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
317 {
318 switch (kvm_vcpu_trap_get_fault(vcpu)) {
319 case FSC_SEA:
320 case FSC_SEA_TTW0:
321 case FSC_SEA_TTW1:
322 case FSC_SEA_TTW2:
323 case FSC_SEA_TTW3:
324 case FSC_SECC:
325 case FSC_SECC_TTW0:
326 case FSC_SECC_TTW1:
327 case FSC_SECC_TTW2:
328 case FSC_SECC_TTW3:
329 return true;
330 default:
331 return false;
332 }
333 }
334
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)335 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
336 {
337 u32 esr = kvm_vcpu_get_hsr(vcpu);
338 return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
339 }
340
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)341 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
342 {
343 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
344 }
345
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)346 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
347 {
348 if (vcpu_mode_is_32bit(vcpu)) {
349 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
350 } else {
351 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
352 sctlr |= (1 << 25);
353 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
354 }
355 }
356
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)357 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
358 {
359 if (vcpu_mode_is_32bit(vcpu))
360 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
361
362 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
363 }
364
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)365 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
366 unsigned long data,
367 unsigned int len)
368 {
369 if (kvm_vcpu_is_be(vcpu)) {
370 switch (len) {
371 case 1:
372 return data & 0xff;
373 case 2:
374 return be16_to_cpu(data & 0xffff);
375 case 4:
376 return be32_to_cpu(data & 0xffffffff);
377 default:
378 return be64_to_cpu(data);
379 }
380 } else {
381 switch (len) {
382 case 1:
383 return data & 0xff;
384 case 2:
385 return le16_to_cpu(data & 0xffff);
386 case 4:
387 return le32_to_cpu(data & 0xffffffff);
388 default:
389 return le64_to_cpu(data);
390 }
391 }
392
393 return data; /* Leave LE untouched */
394 }
395
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)396 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
397 unsigned long data,
398 unsigned int len)
399 {
400 if (kvm_vcpu_is_be(vcpu)) {
401 switch (len) {
402 case 1:
403 return data & 0xff;
404 case 2:
405 return cpu_to_be16(data & 0xffff);
406 case 4:
407 return cpu_to_be32(data & 0xffffffff);
408 default:
409 return cpu_to_be64(data);
410 }
411 } else {
412 switch (len) {
413 case 1:
414 return data & 0xff;
415 case 2:
416 return cpu_to_le16(data & 0xffff);
417 case 4:
418 return cpu_to_le32(data & 0xffffffff);
419 default:
420 return cpu_to_le64(data);
421 }
422 }
423
424 return data; /* Leave LE untouched */
425 }
426
427 #endif /* __ARM64_KVM_EMULATE_H__ */
428