1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vmx_apic_access_test
4 *
5 * Copyright (C) 2020, Google LLC.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2.
8 *
9 * The first subtest simply checks to see that an L2 guest can be
10 * launched with a valid APIC-access address that is backed by a
11 * page of L1 physical memory.
12 *
13 * The second subtest sets the APIC-access address to a (valid) L1
14 * physical address that is not backed by memory. KVM can't handle
15 * this situation, so resuming L2 should result in a KVM exit for
16 * internal error (emulation). This is not an architectural
17 * requirement. It is just a shortcoming of KVM. The internal error
18 * is unfortunate, but it's better than what used to happen!
19 */
20
21 #include "test_util.h"
22 #include "kvm_util.h"
23 #include "processor.h"
24 #include "vmx.h"
25
26 #include <string.h>
27 #include <sys/ioctl.h>
28
29 #include "kselftest.h"
30
31 #define VCPU_ID 0
32
33 /* The virtual machine object. */
34 static struct kvm_vm *vm;
35
l2_guest_code(void)36 static void l2_guest_code(void)
37 {
38 /* Exit to L1 */
39 __asm__ __volatile__("vmcall");
40 }
41
l1_guest_code(struct vmx_pages * vmx_pages,unsigned long high_gpa)42 static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
43 {
44 #define L2_GUEST_STACK_SIZE 64
45 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
46 uint32_t control;
47
48 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
49 GUEST_ASSERT(load_vmcs(vmx_pages));
50
51 /* Prepare the VMCS for L2 execution. */
52 prepare_vmcs(vmx_pages, l2_guest_code,
53 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
54 control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
55 control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
56 vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
57 control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
58 control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
59 vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
60 vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa);
61
62 /* Try to launch L2 with the memory-backed APIC-access address. */
63 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
64 GUEST_ASSERT(!vmlaunch());
65 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
66
67 vmwrite(APIC_ACCESS_ADDR, high_gpa);
68
69 /* Try to resume L2 with the unbacked APIC-access address. */
70 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
71 GUEST_ASSERT(!vmresume());
72 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
73
74 GUEST_DONE();
75 }
76
main(int argc,char * argv[])77 int main(int argc, char *argv[])
78 {
79 unsigned long apic_access_addr = ~0ul;
80 unsigned int paddr_width;
81 unsigned int vaddr_width;
82 vm_vaddr_t vmx_pages_gva;
83 unsigned long high_gpa;
84 struct vmx_pages *vmx;
85 bool done = false;
86
87 nested_vmx_check_supported();
88
89 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
90 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
91
92 kvm_get_cpu_address_width(&paddr_width, &vaddr_width);
93 high_gpa = (1ul << paddr_width) - getpagesize();
94 if ((unsigned long)DEFAULT_GUEST_PHY_PAGES * getpagesize() > high_gpa) {
95 print_skip("No unbacked physical page available");
96 exit(KSFT_SKIP);
97 }
98
99 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
100 prepare_virtualize_apic_accesses(vmx, vm, 0);
101 vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa);
102
103 while (!done) {
104 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
105 struct ucall uc;
106
107 vcpu_run(vm, VCPU_ID);
108 if (apic_access_addr == high_gpa) {
109 TEST_ASSERT(run->exit_reason ==
110 KVM_EXIT_INTERNAL_ERROR,
111 "Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n",
112 run->exit_reason,
113 exit_reason_str(run->exit_reason));
114 TEST_ASSERT(run->internal.suberror ==
115 KVM_INTERNAL_ERROR_EMULATION,
116 "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
117 run->internal.suberror);
118 break;
119 }
120 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
121 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
122 run->exit_reason,
123 exit_reason_str(run->exit_reason));
124
125 switch (get_ucall(vm, VCPU_ID, &uc)) {
126 case UCALL_ABORT:
127 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
128 __FILE__, uc.args[1]);
129 /* NOT REACHED */
130 case UCALL_SYNC:
131 apic_access_addr = uc.args[1];
132 break;
133 case UCALL_DONE:
134 done = true;
135 break;
136 default:
137 TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd);
138 }
139 }
140 kvm_vm_free(vm);
141 return 0;
142 }
143