1 /*
2 * KVM_GET/SET_* tests
3 *
4 * Copyright (C) 2018, Red Hat, Inc.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 *
8 * Tests for vCPU state save/restore, including nested guest state.
9 */
10 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include <fcntl.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/ioctl.h>
16
17 #include "test_util.h"
18
19 #include "kvm_util.h"
20 #include "x86.h"
21 #include "vmx.h"
22
23 #define VCPU_ID 5
24
25 static bool have_nested_state;
26
l2_guest_code(void)27 void l2_guest_code(void)
28 {
29 GUEST_SYNC(5);
30
31 /* Exit to L1 */
32 vmcall();
33
34 /* L1 has now set up a shadow VMCS for us. */
35 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
36 GUEST_SYNC(9);
37 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
38 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
39 GUEST_SYNC(10);
40 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
41 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
42 GUEST_SYNC(11);
43
44 /* Done, exit to L1 and never come back. */
45 vmcall();
46 }
47
l1_guest_code(struct vmx_pages * vmx_pages)48 void l1_guest_code(struct vmx_pages *vmx_pages)
49 {
50 #define L2_GUEST_STACK_SIZE 64
51 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
52
53 GUEST_ASSERT(vmx_pages->vmcs_gpa);
54 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
55 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
56
57 GUEST_SYNC(3);
58 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
59
60 prepare_vmcs(vmx_pages, l2_guest_code,
61 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
62
63 GUEST_SYNC(4);
64 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
65 GUEST_ASSERT(!vmlaunch());
66 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
67 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
68
69 /* Check that the launched state is preserved. */
70 GUEST_ASSERT(vmlaunch());
71
72 GUEST_ASSERT(!vmresume());
73 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
74
75 GUEST_SYNC(6);
76 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
77
78 GUEST_ASSERT(!vmresume());
79 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
80
81 vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
82
83 vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
84 vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
85
86 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
87 GUEST_ASSERT(vmlaunch());
88 GUEST_SYNC(7);
89 GUEST_ASSERT(vmlaunch());
90 GUEST_ASSERT(vmresume());
91
92 vmwrite(GUEST_RIP, 0xc0ffee);
93 GUEST_SYNC(8);
94 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
95
96 GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
97 GUEST_ASSERT(!vmresume());
98 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
99
100 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
101 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
102 GUEST_ASSERT(vmlaunch());
103 GUEST_ASSERT(vmresume());
104 GUEST_SYNC(12);
105 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
106 GUEST_ASSERT(vmlaunch());
107 GUEST_ASSERT(vmresume());
108 }
109
guest_code(struct vmx_pages * vmx_pages)110 void guest_code(struct vmx_pages *vmx_pages)
111 {
112 GUEST_SYNC(1);
113 GUEST_SYNC(2);
114
115 if (vmx_pages)
116 l1_guest_code(vmx_pages);
117
118 GUEST_DONE();
119 }
120
main(int argc,char * argv[])121 int main(int argc, char *argv[])
122 {
123 struct vmx_pages *vmx_pages = NULL;
124 vm_vaddr_t vmx_pages_gva = 0;
125
126 struct kvm_regs regs1, regs2;
127 struct kvm_vm *vm;
128 struct kvm_run *run;
129 struct kvm_x86_state *state;
130 int stage;
131
132 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
133
134 /* Create VM */
135 vm = vm_create_default(VCPU_ID, 0, guest_code);
136 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
137 run = vcpu_state(vm, VCPU_ID);
138
139 vcpu_regs_get(vm, VCPU_ID, ®s1);
140
141 if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
142 vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
143 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
144 } else {
145 printf("will skip nested state checks\n");
146 vcpu_args_set(vm, VCPU_ID, 1, 0);
147 }
148
149 for (stage = 1;; stage++) {
150 _vcpu_run(vm, VCPU_ID);
151 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
152 "Unexpected exit reason: %u (%s),\n",
153 run->exit_reason,
154 exit_reason_str(run->exit_reason));
155
156 memset(®s1, 0, sizeof(regs1));
157 vcpu_regs_get(vm, VCPU_ID, ®s1);
158 switch (run->io.port) {
159 case GUEST_PORT_ABORT:
160 TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi,
161 __FILE__, regs1.rsi);
162 /* NOT REACHED */
163 case GUEST_PORT_SYNC:
164 break;
165 case GUEST_PORT_DONE:
166 goto done;
167 default:
168 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
169 }
170
171 /* PORT_SYNC is handled here. */
172 TEST_ASSERT(!strcmp((const char *)regs1.rdi, "hello") &&
173 regs1.rsi == stage, "Unexpected register values vmexit #%lx, got %lx",
174 stage, (ulong) regs1.rsi);
175
176 state = vcpu_save_state(vm, VCPU_ID);
177 kvm_vm_release(vm);
178
179 /* Restore state in a new VM. */
180 kvm_vm_restart(vm, O_RDWR);
181 vm_vcpu_add(vm, VCPU_ID, 0, 0);
182 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
183 vcpu_load_state(vm, VCPU_ID, state);
184 run = vcpu_state(vm, VCPU_ID);
185 free(state);
186
187 memset(®s2, 0, sizeof(regs2));
188 vcpu_regs_get(vm, VCPU_ID, ®s2);
189 TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
190 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
191 (ulong) regs2.rdi, (ulong) regs2.rsi);
192 }
193
194 done:
195 kvm_vm_free(vm);
196 }
197