1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/guest.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
27 #include <linux/fs.h>
28 #include <kvm/arm_psci.h>
29 #include <asm/cputype.h>
30 #include <linux/uaccess.h>
31 #include <asm/kvm.h>
32 #include <asm/kvm_emulate.h>
33 #include <asm/kvm_coproc.h>
34
35 #include "trace.h"
36
37 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
38 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
39
40 struct kvm_stats_debugfs_item debugfs_entries[] = {
41 VCPU_STAT(hvc_exit_stat),
42 VCPU_STAT(wfe_exit_stat),
43 VCPU_STAT(wfi_exit_stat),
44 VCPU_STAT(mmio_exit_user),
45 VCPU_STAT(mmio_exit_kernel),
46 VCPU_STAT(exits),
47 { NULL }
48 };
49
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)50 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
51 {
52 return 0;
53 }
54
core_reg_offset_from_id(u64 id)55 static u64 core_reg_offset_from_id(u64 id)
56 {
57 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
58 }
59
validate_core_offset(const struct kvm_one_reg * reg)60 static int validate_core_offset(const struct kvm_one_reg *reg)
61 {
62 u64 off = core_reg_offset_from_id(reg->id);
63 int size;
64
65 switch (off) {
66 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
67 KVM_REG_ARM_CORE_REG(regs.regs[30]):
68 case KVM_REG_ARM_CORE_REG(regs.sp):
69 case KVM_REG_ARM_CORE_REG(regs.pc):
70 case KVM_REG_ARM_CORE_REG(regs.pstate):
71 case KVM_REG_ARM_CORE_REG(sp_el1):
72 case KVM_REG_ARM_CORE_REG(elr_el1):
73 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
74 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
75 size = sizeof(__u64);
76 break;
77
78 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
79 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
80 size = sizeof(__uint128_t);
81 break;
82
83 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
84 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
85 size = sizeof(__u32);
86 break;
87
88 default:
89 return -EINVAL;
90 }
91
92 if (KVM_REG_SIZE(reg->id) == size &&
93 IS_ALIGNED(off, size / sizeof(__u32)))
94 return 0;
95
96 return -EINVAL;
97 }
98
get_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)99 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
100 {
101 /*
102 * Because the kvm_regs structure is a mix of 32, 64 and
103 * 128bit fields, we index it as if it was a 32bit
104 * array. Hence below, nr_regs is the number of entries, and
105 * off the index in the "array".
106 */
107 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
108 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
109 int nr_regs = sizeof(*regs) / sizeof(__u32);
110 u32 off;
111
112 /* Our ID is an index into the kvm_regs struct. */
113 off = core_reg_offset_from_id(reg->id);
114 if (off >= nr_regs ||
115 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
116 return -ENOENT;
117
118 if (validate_core_offset(reg))
119 return -EINVAL;
120
121 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
122 return -EFAULT;
123
124 return 0;
125 }
126
set_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)127 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
128 {
129 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
130 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
131 int nr_regs = sizeof(*regs) / sizeof(__u32);
132 __uint128_t tmp;
133 void *valp = &tmp;
134 u64 off;
135 int err = 0;
136
137 /* Our ID is an index into the kvm_regs struct. */
138 off = core_reg_offset_from_id(reg->id);
139 if (off >= nr_regs ||
140 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
141 return -ENOENT;
142
143 if (validate_core_offset(reg))
144 return -EINVAL;
145
146 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
147 return -EINVAL;
148
149 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
150 err = -EFAULT;
151 goto out;
152 }
153
154 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
155 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
156 switch (mode) {
157 case PSR_AA32_MODE_USR:
158 if (!system_supports_32bit_el0())
159 return -EINVAL;
160 break;
161 case PSR_AA32_MODE_FIQ:
162 case PSR_AA32_MODE_IRQ:
163 case PSR_AA32_MODE_SVC:
164 case PSR_AA32_MODE_ABT:
165 case PSR_AA32_MODE_UND:
166 if (!vcpu_el1_is_32bit(vcpu))
167 return -EINVAL;
168 break;
169 case PSR_MODE_EL0t:
170 case PSR_MODE_EL1t:
171 case PSR_MODE_EL1h:
172 if (vcpu_el1_is_32bit(vcpu))
173 return -EINVAL;
174 break;
175 default:
176 err = -EINVAL;
177 goto out;
178 }
179 }
180
181 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
182 out:
183 return err;
184 }
185
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)186 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
187 {
188 return -EINVAL;
189 }
190
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)191 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
192 {
193 return -EINVAL;
194 }
195
num_core_regs(void)196 static unsigned long num_core_regs(void)
197 {
198 return sizeof(struct kvm_regs) / sizeof(__u32);
199 }
200
201 /**
202 * ARM64 versions of the TIMER registers, always available on arm64
203 */
204
205 #define NUM_TIMER_REGS 3
206
is_timer_reg(u64 index)207 static bool is_timer_reg(u64 index)
208 {
209 switch (index) {
210 case KVM_REG_ARM_TIMER_CTL:
211 case KVM_REG_ARM_TIMER_CNT:
212 case KVM_REG_ARM_TIMER_CVAL:
213 return true;
214 }
215 return false;
216 }
217
copy_timer_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)218 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
219 {
220 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
221 return -EFAULT;
222 uindices++;
223 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
224 return -EFAULT;
225 uindices++;
226 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
227 return -EFAULT;
228
229 return 0;
230 }
231
set_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)232 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
233 {
234 void __user *uaddr = (void __user *)(long)reg->addr;
235 u64 val;
236 int ret;
237
238 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
239 if (ret != 0)
240 return -EFAULT;
241
242 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
243 }
244
get_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)245 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
246 {
247 void __user *uaddr = (void __user *)(long)reg->addr;
248 u64 val;
249
250 val = kvm_arm_timer_get_reg(vcpu, reg->id);
251 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
252 }
253
254 /**
255 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
256 *
257 * This is for all registers.
258 */
kvm_arm_num_regs(struct kvm_vcpu * vcpu)259 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
260 {
261 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
262 + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
263 }
264
265 /**
266 * kvm_arm_copy_reg_indices - get indices of all registers.
267 *
268 * We do core registers right here, then we append system regs.
269 */
kvm_arm_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)270 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
271 {
272 unsigned int i;
273 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
274 int ret;
275
276 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
277 if (put_user(core_reg | i, uindices))
278 return -EFAULT;
279 uindices++;
280 }
281
282 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
283 if (ret)
284 return ret;
285 uindices += kvm_arm_get_fw_num_regs(vcpu);
286
287 ret = copy_timer_indices(vcpu, uindices);
288 if (ret)
289 return ret;
290 uindices += NUM_TIMER_REGS;
291
292 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
293 }
294
kvm_arm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)295 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
296 {
297 /* We currently use nothing arch-specific in upper 32 bits */
298 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
299 return -EINVAL;
300
301 /* Register group 16 means we want a core register. */
302 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
303 return get_core_reg(vcpu, reg);
304
305 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
306 return kvm_arm_get_fw_reg(vcpu, reg);
307
308 if (is_timer_reg(reg->id))
309 return get_timer_reg(vcpu, reg);
310
311 return kvm_arm_sys_reg_get_reg(vcpu, reg);
312 }
313
kvm_arm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)314 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
315 {
316 /* We currently use nothing arch-specific in upper 32 bits */
317 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
318 return -EINVAL;
319
320 /* Register group 16 means we set a core register. */
321 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
322 return set_core_reg(vcpu, reg);
323
324 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
325 return kvm_arm_set_fw_reg(vcpu, reg);
326
327 if (is_timer_reg(reg->id))
328 return set_timer_reg(vcpu, reg);
329
330 return kvm_arm_sys_reg_set_reg(vcpu, reg);
331 }
332
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)333 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
334 struct kvm_sregs *sregs)
335 {
336 return -EINVAL;
337 }
338
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)339 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
340 struct kvm_sregs *sregs)
341 {
342 return -EINVAL;
343 }
344
__kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)345 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
346 struct kvm_vcpu_events *events)
347 {
348 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
349 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
350
351 if (events->exception.serror_pending && events->exception.serror_has_esr)
352 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
353
354 return 0;
355 }
356
__kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)357 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
358 struct kvm_vcpu_events *events)
359 {
360 bool serror_pending = events->exception.serror_pending;
361 bool has_esr = events->exception.serror_has_esr;
362
363 if (serror_pending && has_esr) {
364 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
365 return -EINVAL;
366
367 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
368 kvm_set_sei_esr(vcpu, events->exception.serror_esr);
369 else
370 return -EINVAL;
371 } else if (serror_pending) {
372 kvm_inject_vabt(vcpu);
373 }
374
375 return 0;
376 }
377
kvm_target_cpu(void)378 int __attribute_const__ kvm_target_cpu(void)
379 {
380 unsigned long implementor = read_cpuid_implementor();
381 unsigned long part_number = read_cpuid_part_number();
382
383 switch (implementor) {
384 case ARM_CPU_IMP_ARM:
385 switch (part_number) {
386 case ARM_CPU_PART_AEM_V8:
387 return KVM_ARM_TARGET_AEM_V8;
388 case ARM_CPU_PART_FOUNDATION:
389 return KVM_ARM_TARGET_FOUNDATION_V8;
390 case ARM_CPU_PART_CORTEX_A53:
391 return KVM_ARM_TARGET_CORTEX_A53;
392 case ARM_CPU_PART_CORTEX_A57:
393 return KVM_ARM_TARGET_CORTEX_A57;
394 };
395 break;
396 case ARM_CPU_IMP_APM:
397 switch (part_number) {
398 case APM_CPU_PART_POTENZA:
399 return KVM_ARM_TARGET_XGENE_POTENZA;
400 };
401 break;
402 };
403
404 /* Return a default generic target */
405 return KVM_ARM_TARGET_GENERIC_V8;
406 }
407
kvm_vcpu_preferred_target(struct kvm_vcpu_init * init)408 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
409 {
410 int target = kvm_target_cpu();
411
412 if (target < 0)
413 return -ENODEV;
414
415 memset(init, 0, sizeof(*init));
416
417 /*
418 * For now, we don't return any features.
419 * In future, we might use features to return target
420 * specific features available for the preferred
421 * target type.
422 */
423 init->target = (__u32)target;
424
425 return 0;
426 }
427
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)428 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
429 {
430 return -EINVAL;
431 }
432
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)433 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
434 {
435 return -EINVAL;
436 }
437
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)438 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
439 struct kvm_translation *tr)
440 {
441 return -EINVAL;
442 }
443
444 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
445 KVM_GUESTDBG_USE_SW_BP | \
446 KVM_GUESTDBG_USE_HW | \
447 KVM_GUESTDBG_SINGLESTEP)
448
449 /**
450 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
451 * @kvm: pointer to the KVM struct
452 * @kvm_guest_debug: the ioctl data buffer
453 *
454 * This sets up and enables the VM for guest debugging. Userspace
455 * passes in a control flag to enable different debug types and
456 * potentially other architecture specific information in the rest of
457 * the structure.
458 */
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)459 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
460 struct kvm_guest_debug *dbg)
461 {
462 int ret = 0;
463
464 trace_kvm_set_guest_debug(vcpu, dbg->control);
465
466 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
467 ret = -EINVAL;
468 goto out;
469 }
470
471 if (dbg->control & KVM_GUESTDBG_ENABLE) {
472 vcpu->guest_debug = dbg->control;
473
474 /* Hardware assisted Break and Watch points */
475 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
476 vcpu->arch.external_debug_state = dbg->arch;
477 }
478
479 } else {
480 /* If not enabled clear all flags */
481 vcpu->guest_debug = 0;
482 }
483
484 out:
485 return ret;
486 }
487
kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)488 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
489 struct kvm_device_attr *attr)
490 {
491 int ret;
492
493 switch (attr->group) {
494 case KVM_ARM_VCPU_PMU_V3_CTRL:
495 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
496 break;
497 case KVM_ARM_VCPU_TIMER_CTRL:
498 ret = kvm_arm_timer_set_attr(vcpu, attr);
499 break;
500 default:
501 ret = -ENXIO;
502 break;
503 }
504
505 return ret;
506 }
507
kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)508 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
509 struct kvm_device_attr *attr)
510 {
511 int ret;
512
513 switch (attr->group) {
514 case KVM_ARM_VCPU_PMU_V3_CTRL:
515 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
516 break;
517 case KVM_ARM_VCPU_TIMER_CTRL:
518 ret = kvm_arm_timer_get_attr(vcpu, attr);
519 break;
520 default:
521 ret = -ENXIO;
522 break;
523 }
524
525 return ret;
526 }
527
kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)528 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
529 struct kvm_device_attr *attr)
530 {
531 int ret;
532
533 switch (attr->group) {
534 case KVM_ARM_VCPU_PMU_V3_CTRL:
535 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
536 break;
537 case KVM_ARM_VCPU_TIMER_CTRL:
538 ret = kvm_arm_timer_has_attr(vcpu, attr);
539 break;
540 default:
541 ret = -ENXIO;
542 break;
543 }
544
545 return ret;
546 }
547