1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vmx_set_nested_state_test
4 *
5 * Copyright (C) 2019, Google LLC.
6 *
7 * This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE.
8 */
9
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "vmx.h"
14
15 #include <errno.h>
16 #include <linux/kvm.h>
17 #include <string.h>
18 #include <sys/ioctl.h>
19 #include <unistd.h>
20
21 /*
22 * Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value
23 * changes this should be updated.
24 */
25 #define VMCS12_REVISION 0x11e57ed0
26
27 bool have_evmcs;
28
test_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)29 void test_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *state)
30 {
31 vcpu_nested_state_set(vcpu, state);
32 }
33
test_nested_state_expect_errno(struct kvm_vcpu * vcpu,struct kvm_nested_state * state,int expected_errno)34 void test_nested_state_expect_errno(struct kvm_vcpu *vcpu,
35 struct kvm_nested_state *state,
36 int expected_errno)
37 {
38 int rv;
39
40 rv = __vcpu_nested_state_set(vcpu, state);
41 TEST_ASSERT(rv == -1 && errno == expected_errno,
42 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
43 strerror(expected_errno), expected_errno, rv, strerror(errno),
44 errno);
45 }
46
test_nested_state_expect_einval(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)47 void test_nested_state_expect_einval(struct kvm_vcpu *vcpu,
48 struct kvm_nested_state *state)
49 {
50 test_nested_state_expect_errno(vcpu, state, EINVAL);
51 }
52
test_nested_state_expect_efault(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)53 void test_nested_state_expect_efault(struct kvm_vcpu *vcpu,
54 struct kvm_nested_state *state)
55 {
56 test_nested_state_expect_errno(vcpu, state, EFAULT);
57 }
58
set_revision_id_for_vmcs12(struct kvm_nested_state * state,u32 vmcs12_revision)59 void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
60 u32 vmcs12_revision)
61 {
62 /* Set revision_id in vmcs12 to vmcs12_revision. */
63 memcpy(&state->data, &vmcs12_revision, sizeof(u32));
64 }
65
set_default_state(struct kvm_nested_state * state)66 void set_default_state(struct kvm_nested_state *state)
67 {
68 memset(state, 0, sizeof(*state));
69 state->flags = KVM_STATE_NESTED_RUN_PENDING |
70 KVM_STATE_NESTED_GUEST_MODE;
71 state->format = 0;
72 state->size = sizeof(*state);
73 }
74
set_default_vmx_state(struct kvm_nested_state * state,int size)75 void set_default_vmx_state(struct kvm_nested_state *state, int size)
76 {
77 memset(state, 0, size);
78 if (have_evmcs)
79 state->flags = KVM_STATE_NESTED_EVMCS;
80 state->format = 0;
81 state->size = size;
82 state->hdr.vmx.vmxon_pa = 0x1000;
83 state->hdr.vmx.vmcs12_pa = 0x2000;
84 state->hdr.vmx.smm.flags = 0;
85 set_revision_id_for_vmcs12(state, VMCS12_REVISION);
86 }
87
test_vmx_nested_state(struct kvm_vcpu * vcpu)88 void test_vmx_nested_state(struct kvm_vcpu *vcpu)
89 {
90 /* Add a page for VMCS12. */
91 const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
92 struct kvm_nested_state *state =
93 (struct kvm_nested_state *)malloc(state_sz);
94
95 /* The format must be set to 0. 0 for VMX, 1 for SVM. */
96 set_default_vmx_state(state, state_sz);
97 state->format = 1;
98 test_nested_state_expect_einval(vcpu, state);
99
100 /*
101 * We cannot virtualize anything if the guest does not have VMX
102 * enabled.
103 */
104 set_default_vmx_state(state, state_sz);
105 test_nested_state_expect_einval(vcpu, state);
106
107 /*
108 * We cannot virtualize anything if the guest does not have VMX
109 * enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
110 * is set to -1ull, but the flags must be zero.
111 */
112 set_default_vmx_state(state, state_sz);
113 state->hdr.vmx.vmxon_pa = -1ull;
114 test_nested_state_expect_einval(vcpu, state);
115
116 state->hdr.vmx.vmcs12_pa = -1ull;
117 state->flags = KVM_STATE_NESTED_EVMCS;
118 test_nested_state_expect_einval(vcpu, state);
119
120 state->flags = 0;
121 test_nested_state(vcpu, state);
122
123 /* Enable VMX in the guest CPUID. */
124 vcpu_set_cpuid_feature(vcpu, X86_FEATURE_VMX);
125
126 /*
127 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
128 * setting the nested state but flags other than eVMCS must be clear.
129 * The eVMCS flag can be set if the enlightened VMCS capability has
130 * been enabled.
131 */
132 set_default_vmx_state(state, state_sz);
133 state->hdr.vmx.vmxon_pa = -1ull;
134 state->hdr.vmx.vmcs12_pa = -1ull;
135 test_nested_state_expect_einval(vcpu, state);
136
137 state->flags &= KVM_STATE_NESTED_EVMCS;
138 if (have_evmcs) {
139 test_nested_state_expect_einval(vcpu, state);
140 vcpu_enable_evmcs(vcpu);
141 }
142 test_nested_state(vcpu, state);
143
144 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
145 state->hdr.vmx.smm.flags = 1;
146 test_nested_state_expect_einval(vcpu, state);
147
148 /* Invalid flags are rejected. */
149 set_default_vmx_state(state, state_sz);
150 state->hdr.vmx.flags = ~0;
151 test_nested_state_expect_einval(vcpu, state);
152
153 /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
154 set_default_vmx_state(state, state_sz);
155 state->hdr.vmx.vmxon_pa = -1ull;
156 state->flags = 0;
157 test_nested_state_expect_einval(vcpu, state);
158
159 /* It is invalid to have vmxon_pa set to a non-page aligned address. */
160 set_default_vmx_state(state, state_sz);
161 state->hdr.vmx.vmxon_pa = 1;
162 test_nested_state_expect_einval(vcpu, state);
163
164 /*
165 * It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and
166 * KVM_STATE_NESTED_GUEST_MODE set together.
167 */
168 set_default_vmx_state(state, state_sz);
169 state->flags = KVM_STATE_NESTED_GUEST_MODE |
170 KVM_STATE_NESTED_RUN_PENDING;
171 state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
172 test_nested_state_expect_einval(vcpu, state);
173
174 /*
175 * It is invalid to have any of the SMM flags set besides:
176 * KVM_STATE_NESTED_SMM_GUEST_MODE
177 * KVM_STATE_NESTED_SMM_VMXON
178 */
179 set_default_vmx_state(state, state_sz);
180 state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
181 KVM_STATE_NESTED_SMM_VMXON);
182 test_nested_state_expect_einval(vcpu, state);
183
184 /* Outside SMM, SMM flags must be zero. */
185 set_default_vmx_state(state, state_sz);
186 state->flags = 0;
187 state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
188 test_nested_state_expect_einval(vcpu, state);
189
190 /*
191 * Size must be large enough to fit kvm_nested_state and vmcs12
192 * if VMCS12 physical address is set
193 */
194 set_default_vmx_state(state, state_sz);
195 state->size = sizeof(*state);
196 state->flags = 0;
197 test_nested_state_expect_einval(vcpu, state);
198
199 set_default_vmx_state(state, state_sz);
200 state->size = sizeof(*state);
201 state->flags = 0;
202 state->hdr.vmx.vmcs12_pa = -1;
203 test_nested_state(vcpu, state);
204
205 /*
206 * KVM_SET_NESTED_STATE succeeds with invalid VMCS
207 * contents but L2 not running.
208 */
209 set_default_vmx_state(state, state_sz);
210 state->flags = 0;
211 test_nested_state(vcpu, state);
212
213 /* Invalid flags are rejected, even if no VMCS loaded. */
214 set_default_vmx_state(state, state_sz);
215 state->size = sizeof(*state);
216 state->flags = 0;
217 state->hdr.vmx.vmcs12_pa = -1;
218 state->hdr.vmx.flags = ~0;
219 test_nested_state_expect_einval(vcpu, state);
220
221 /* vmxon_pa cannot be the same address as vmcs_pa. */
222 set_default_vmx_state(state, state_sz);
223 state->hdr.vmx.vmxon_pa = 0;
224 state->hdr.vmx.vmcs12_pa = 0;
225 test_nested_state_expect_einval(vcpu, state);
226
227 /*
228 * Test that if we leave nesting the state reflects that when we get
229 * it again.
230 */
231 set_default_vmx_state(state, state_sz);
232 state->hdr.vmx.vmxon_pa = -1ull;
233 state->hdr.vmx.vmcs12_pa = -1ull;
234 state->flags = 0;
235 test_nested_state(vcpu, state);
236 vcpu_nested_state_get(vcpu, state);
237 TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
238 "Size must be between %ld and %d. The size returned was %d.",
239 sizeof(*state), state_sz, state->size);
240 TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
241 TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
242
243 free(state);
244 }
245
main(int argc,char * argv[])246 int main(int argc, char *argv[])
247 {
248 struct kvm_vm *vm;
249 struct kvm_nested_state state;
250 struct kvm_vcpu *vcpu;
251
252 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
253
254 TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
255
256 /*
257 * AMD currently does not implement set_nested_state, so for now we
258 * just early out.
259 */
260 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
261
262 vm = vm_create_with_one_vcpu(&vcpu, NULL);
263
264 /*
265 * First run tests with VMX disabled to check error handling.
266 */
267 vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_VMX);
268
269 /* Passing a NULL kvm_nested_state causes a EFAULT. */
270 test_nested_state_expect_efault(vcpu, NULL);
271
272 /* 'size' cannot be smaller than sizeof(kvm_nested_state). */
273 set_default_state(&state);
274 state.size = 0;
275 test_nested_state_expect_einval(vcpu, &state);
276
277 /*
278 * Setting the flags 0xf fails the flags check. The only flags that
279 * can be used are:
280 * KVM_STATE_NESTED_GUEST_MODE
281 * KVM_STATE_NESTED_RUN_PENDING
282 * KVM_STATE_NESTED_EVMCS
283 */
284 set_default_state(&state);
285 state.flags = 0xf;
286 test_nested_state_expect_einval(vcpu, &state);
287
288 /*
289 * If KVM_STATE_NESTED_RUN_PENDING is set then
290 * KVM_STATE_NESTED_GUEST_MODE has to be set as well.
291 */
292 set_default_state(&state);
293 state.flags = KVM_STATE_NESTED_RUN_PENDING;
294 test_nested_state_expect_einval(vcpu, &state);
295
296 test_vmx_nested_state(vcpu);
297
298 kvm_vm_free(vm);
299 return 0;
300 }
301