1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * svm_vmcall_test
4 *
5 * Copyright (C) 2020, Red Hat, Inc.
6 *
7 * Nested SVM testing: VMCALL
8 */
9
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "svm_util.h"
14
15 #define VCPU_ID 5
16
17 static struct kvm_vm *vm;
18
l2_guest_code(struct svm_test_data * svm)19 static void l2_guest_code(struct svm_test_data *svm)
20 {
21 __asm__ __volatile__("vmcall");
22 }
23
l1_guest_code(struct svm_test_data * svm)24 static void l1_guest_code(struct svm_test_data *svm)
25 {
26 #define L2_GUEST_STACK_SIZE 64
27 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
28 struct vmcb *vmcb = svm->vmcb;
29
30 /* Prepare for L2 execution. */
31 generic_svm_setup(svm, l2_guest_code,
32 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
33
34 run_guest(vmcb, svm->vmcb_gpa);
35
36 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
37 GUEST_DONE();
38 }
39
main(int argc,char * argv[])40 int main(int argc, char *argv[])
41 {
42 vm_vaddr_t svm_gva;
43
44 nested_svm_check_supported();
45
46 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
47 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
48
49 vcpu_alloc_svm(vm, &svm_gva);
50 vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
51
52 for (;;) {
53 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
54 struct ucall uc;
55
56 vcpu_run(vm, VCPU_ID);
57 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
58 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
59 run->exit_reason,
60 exit_reason_str(run->exit_reason));
61
62 switch (get_ucall(vm, VCPU_ID, &uc)) {
63 case UCALL_ABORT:
64 TEST_FAIL("%s", (const char *)uc.args[0]);
65 /* NOT REACHED */
66 case UCALL_SYNC:
67 break;
68 case UCALL_DONE:
69 goto done;
70 default:
71 TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
72 }
73 }
74 done:
75 kvm_vm_free(vm);
76 return 0;
77 }
78