1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE /* for program_invocation_short_name */
3 #include <fcntl.h>
4 #include <pthread.h>
5 #include <sched.h>
6 #include <semaphore.h>
7 #include <signal.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13
14 #include <linux/compiler.h>
15
16 #include <test_util.h>
17 #include <kvm_util.h>
18 #include <processor.h>
19
20 /*
21 * s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a
22 * 2MB sized and aligned region so that the initial region corresponds to
23 * exactly one large page.
24 */
25 #define MEM_REGION_SIZE 0x200000
26
27 #ifdef __x86_64__
28 /*
29 * Somewhat arbitrary location and slot, intended to not overlap anything.
30 */
31 #define MEM_REGION_GPA 0xc0000000
32 #define MEM_REGION_SLOT 10
33
34 static const uint64_t MMIO_VAL = 0xbeefull;
35
36 extern const uint64_t final_rip_start;
37 extern const uint64_t final_rip_end;
38
39 static sem_t vcpu_ready;
40
guest_spin_on_val(uint64_t spin_val)41 static inline uint64_t guest_spin_on_val(uint64_t spin_val)
42 {
43 uint64_t val;
44
45 do {
46 val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
47 } while (val == spin_val);
48
49 GUEST_SYNC(0);
50 return val;
51 }
52
vcpu_worker(void * data)53 static void *vcpu_worker(void *data)
54 {
55 struct kvm_vcpu *vcpu = data;
56 struct kvm_run *run = vcpu->run;
57 struct ucall uc;
58 uint64_t cmd;
59
60 /*
61 * Loop until the guest is done. Re-enter the guest on all MMIO exits,
62 * which will occur if the guest attempts to access a memslot after it
63 * has been deleted or while it is being moved .
64 */
65 while (1) {
66 vcpu_run(vcpu);
67
68 if (run->exit_reason == KVM_EXIT_IO) {
69 cmd = get_ucall(vcpu, &uc);
70 if (cmd != UCALL_SYNC)
71 break;
72
73 sem_post(&vcpu_ready);
74 continue;
75 }
76
77 if (run->exit_reason != KVM_EXIT_MMIO)
78 break;
79
80 TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
81 TEST_ASSERT(run->mmio.len == 8,
82 "Unexpected exit mmio size = %u", run->mmio.len);
83
84 TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
85 "Unexpected exit mmio address = 0x%llx",
86 run->mmio.phys_addr);
87 memcpy(run->mmio.data, &MMIO_VAL, 8);
88 }
89
90 if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
91 REPORT_GUEST_ASSERT_1(uc, "val = %lu");
92
93 return NULL;
94 }
95
wait_for_vcpu(void)96 static void wait_for_vcpu(void)
97 {
98 struct timespec ts;
99
100 TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
101 "clock_gettime() failed: %d\n", errno);
102
103 ts.tv_sec += 2;
104 TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
105 "sem_timedwait() failed: %d\n", errno);
106
107 /* Wait for the vCPU thread to reenter the guest. */
108 usleep(100000);
109 }
110
spawn_vm(struct kvm_vcpu ** vcpu,pthread_t * vcpu_thread,void * guest_code)111 static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread,
112 void *guest_code)
113 {
114 struct kvm_vm *vm;
115 uint64_t *hva;
116 uint64_t gpa;
117
118 vm = vm_create_with_one_vcpu(vcpu, guest_code);
119
120 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
121 MEM_REGION_GPA, MEM_REGION_SLOT,
122 MEM_REGION_SIZE / getpagesize(), 0);
123
124 /*
125 * Allocate and map two pages so that the GPA accessed by guest_code()
126 * stays valid across the memslot move.
127 */
128 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
129 TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
130
131 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2);
132
133 /* Ditto for the host mapping so that both pages can be zeroed. */
134 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
135 memset(hva, 0, 2 * 4096);
136
137 pthread_create(vcpu_thread, NULL, vcpu_worker, *vcpu);
138
139 /* Ensure the guest thread is spun up. */
140 wait_for_vcpu();
141
142 return vm;
143 }
144
145
guest_code_move_memory_region(void)146 static void guest_code_move_memory_region(void)
147 {
148 uint64_t val;
149
150 GUEST_SYNC(0);
151
152 /*
153 * Spin until the memory region starts getting moved to a
154 * misaligned address.
155 * Every region move may or may not trigger MMIO, as the
156 * window where the memslot is invalid is usually quite small.
157 */
158 val = guest_spin_on_val(0);
159 GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
160
161 /* Spin until the misaligning memory region move completes. */
162 val = guest_spin_on_val(MMIO_VAL);
163 GUEST_ASSERT_1(val == 1 || val == 0, val);
164
165 /* Spin until the memory region starts to get re-aligned. */
166 val = guest_spin_on_val(0);
167 GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
168
169 /* Spin until the re-aligning memory region move completes. */
170 val = guest_spin_on_val(MMIO_VAL);
171 GUEST_ASSERT_1(val == 1, val);
172
173 GUEST_DONE();
174 }
175
test_move_memory_region(void)176 static void test_move_memory_region(void)
177 {
178 pthread_t vcpu_thread;
179 struct kvm_vcpu *vcpu;
180 struct kvm_vm *vm;
181 uint64_t *hva;
182
183 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
184
185 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
186
187 /*
188 * Shift the region's base GPA. The guest should not see "2" as the
189 * hva->gpa translation is misaligned, i.e. the guest is accessing a
190 * different host pfn.
191 */
192 vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
193 WRITE_ONCE(*hva, 2);
194
195 /*
196 * The guest _might_ see an invalid memslot and trigger MMIO, but it's
197 * a tiny window. Spin and defer the sync until the memslot is
198 * restored and guest behavior is once again deterministic.
199 */
200 usleep(100000);
201
202 /*
203 * Note, value in memory needs to be changed *before* restoring the
204 * memslot, else the guest could race the update and see "2".
205 */
206 WRITE_ONCE(*hva, 1);
207
208 /* Restore the original base, the guest should see "1". */
209 vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
210 wait_for_vcpu();
211 /* Defered sync from when the memslot was misaligned (above). */
212 wait_for_vcpu();
213
214 pthread_join(vcpu_thread, NULL);
215
216 kvm_vm_free(vm);
217 }
218
guest_code_delete_memory_region(void)219 static void guest_code_delete_memory_region(void)
220 {
221 uint64_t val;
222
223 GUEST_SYNC(0);
224
225 /* Spin until the memory region is deleted. */
226 val = guest_spin_on_val(0);
227 GUEST_ASSERT_1(val == MMIO_VAL, val);
228
229 /* Spin until the memory region is recreated. */
230 val = guest_spin_on_val(MMIO_VAL);
231 GUEST_ASSERT_1(val == 0, val);
232
233 /* Spin until the memory region is deleted. */
234 val = guest_spin_on_val(0);
235 GUEST_ASSERT_1(val == MMIO_VAL, val);
236
237 asm("1:\n\t"
238 ".pushsection .rodata\n\t"
239 ".global final_rip_start\n\t"
240 "final_rip_start: .quad 1b\n\t"
241 ".popsection");
242
243 /* Spin indefinitely (until the code memslot is deleted). */
244 guest_spin_on_val(MMIO_VAL);
245
246 asm("1:\n\t"
247 ".pushsection .rodata\n\t"
248 ".global final_rip_end\n\t"
249 "final_rip_end: .quad 1b\n\t"
250 ".popsection");
251
252 GUEST_ASSERT_1(0, 0);
253 }
254
test_delete_memory_region(void)255 static void test_delete_memory_region(void)
256 {
257 pthread_t vcpu_thread;
258 struct kvm_vcpu *vcpu;
259 struct kvm_regs regs;
260 struct kvm_run *run;
261 struct kvm_vm *vm;
262
263 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region);
264
265 /* Delete the memory region, the guest should not die. */
266 vm_mem_region_delete(vm, MEM_REGION_SLOT);
267 wait_for_vcpu();
268
269 /* Recreate the memory region. The guest should see "0". */
270 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
271 MEM_REGION_GPA, MEM_REGION_SLOT,
272 MEM_REGION_SIZE / getpagesize(), 0);
273 wait_for_vcpu();
274
275 /* Delete the region again so that there's only one memslot left. */
276 vm_mem_region_delete(vm, MEM_REGION_SLOT);
277 wait_for_vcpu();
278
279 /*
280 * Delete the primary memslot. This should cause an emulation error or
281 * shutdown due to the page tables getting nuked.
282 */
283 vm_mem_region_delete(vm, 0);
284
285 pthread_join(vcpu_thread, NULL);
286
287 run = vcpu->run;
288
289 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
290 run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
291 "Unexpected exit reason = %d", run->exit_reason);
292
293 vcpu_regs_get(vcpu, ®s);
294
295 /*
296 * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already,
297 * so the instruction pointer would point to the reset vector.
298 */
299 if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
300 TEST_ASSERT(regs.rip >= final_rip_start &&
301 regs.rip < final_rip_end,
302 "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
303 final_rip_start, final_rip_end, regs.rip);
304
305 kvm_vm_free(vm);
306 }
307
test_zero_memory_regions(void)308 static void test_zero_memory_regions(void)
309 {
310 struct kvm_vcpu *vcpu;
311 struct kvm_run *run;
312 struct kvm_vm *vm;
313
314 pr_info("Testing KVM_RUN with zero added memory regions\n");
315
316 vm = vm_create_barebones();
317 vcpu = __vm_vcpu_add(vm, 0);
318
319 vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
320 vcpu_run(vcpu);
321
322 run = vcpu->run;
323 TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
324 "Unexpected exit_reason = %u\n", run->exit_reason);
325
326 kvm_vm_free(vm);
327 }
328 #endif /* __x86_64__ */
329
330 /*
331 * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any
332 * tentative to add further slots should fail.
333 */
test_add_max_memory_regions(void)334 static void test_add_max_memory_regions(void)
335 {
336 int ret;
337 struct kvm_vm *vm;
338 uint32_t max_mem_slots;
339 uint32_t slot;
340 void *mem, *mem_aligned, *mem_extra;
341 size_t alignment;
342
343 #ifdef __s390x__
344 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
345 alignment = 0x100000;
346 #else
347 alignment = 1;
348 #endif
349
350 max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
351 TEST_ASSERT(max_mem_slots > 0,
352 "KVM_CAP_NR_MEMSLOTS should be greater than 0");
353 pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
354
355 vm = vm_create_barebones();
356
357 /* Check it can be added memory slots up to the maximum allowed */
358 pr_info("Adding slots 0..%i, each memory region with %dK size\n",
359 (max_mem_slots - 1), MEM_REGION_SIZE >> 10);
360
361 mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
362 PROT_READ | PROT_WRITE,
363 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
364 TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
365 mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
366
367 for (slot = 0; slot < max_mem_slots; slot++)
368 vm_set_user_memory_region(vm, slot, 0,
369 ((uint64_t)slot * MEM_REGION_SIZE),
370 MEM_REGION_SIZE,
371 mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
372
373 /* Check it cannot be added memory slots beyond the limit */
374 mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
375 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
376 TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
377
378 ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
379 (uint64_t)max_mem_slots * MEM_REGION_SIZE,
380 MEM_REGION_SIZE, mem_extra);
381 TEST_ASSERT(ret == -1 && errno == EINVAL,
382 "Adding one more memory slot should fail with EINVAL");
383
384 munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
385 munmap(mem_extra, MEM_REGION_SIZE);
386 kvm_vm_free(vm);
387 }
388
main(int argc,char * argv[])389 int main(int argc, char *argv[])
390 {
391 #ifdef __x86_64__
392 int i, loops;
393 #endif
394
395 /* Tell stdout not to buffer its content */
396 setbuf(stdout, NULL);
397
398 #ifdef __x86_64__
399 /*
400 * FIXME: the zero-memslot test fails on aarch64 and s390x because
401 * KVM_RUN fails with ENOEXEC or EFAULT.
402 */
403 test_zero_memory_regions();
404 #endif
405
406 test_add_max_memory_regions();
407
408 #ifdef __x86_64__
409 if (argc > 1)
410 loops = atoi(argv[1]);
411 else
412 loops = 10;
413
414 pr_info("Testing MOVE of in-use region, %d loops\n", loops);
415 for (i = 0; i < loops; i++)
416 test_move_memory_region();
417
418 pr_info("Testing DELETE of in-use region, %d loops\n", loops);
419 for (i = 0; i < loops; i++)
420 test_delete_memory_region();
421 #endif
422
423 return 0;
424 }
425