Lines Matching +full:vm +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0
3 * A memslot-related performance benchmark.
36 #define MEM_TEST_SIZE (MEM_SIZE - 4096)
48 #define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - 4096)
50 static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
51 static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
52 static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
53 static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
62 #define MEM_TEST_UNMAP_CHUNK_PAGES (2U << (20 - 12))
92 struct kvm_vm *vm; member
111 * Technically, we need also for the atomic bool to be address-free, which
115 * all KVM-supported platforms.
132 struct vm_data *vm = data; in vcpu_worker() local
137 run = vcpu_state(vm->vm, VCPU_ID); in vcpu_worker()
139 vcpu_run(vm->vm, VCPU_ID); in vcpu_worker()
141 if (run->exit_reason == KVM_EXIT_IO) { in vcpu_worker()
142 cmd = get_ucall(vm->vm, VCPU_ID, &uc); in vcpu_worker()
150 if (run->exit_reason != KVM_EXIT_MMIO) in vcpu_worker()
153 TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit"); in vcpu_worker()
154 TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); in vcpu_worker()
155 TEST_ASSERT(run->mmio.len == 8, in vcpu_worker()
156 "Unexpected exit mmio size = %u", run->mmio.len); in vcpu_worker()
157 TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min && in vcpu_worker()
158 run->mmio.phys_addr <= vm->mmio_gpa_max, in vcpu_worker()
160 run->mmio.phys_addr); in vcpu_worker()
163 if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) in vcpu_worker()
189 TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096, in vm_gpa2hva()
191 gpa -= MEM_GPA; in vm_gpa2hva()
195 slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); in vm_gpa2hva()
196 slotoffs = gpage - (slot * data->pages_per_slot); in vm_gpa2hva()
201 if (slot == data->nslots - 1) in vm_gpa2hva()
202 slotpages = data->npages - slot * data->pages_per_slot; in vm_gpa2hva()
204 slotpages = data->pages_per_slot; in vm_gpa2hva()
208 *rempages = slotpages - slotoffs; in vm_gpa2hva()
211 base = data->hva_slots[slot]; in vm_gpa2hva()
217 TEST_ASSERT(slot < data->nslots, "Too high slot number"); in vm_slot2gpa()
219 return MEM_GPA + slot * data->pages_per_slot * 4096; in vm_slot2gpa()
229 data->vm = NULL; in alloc_vm()
230 data->hva_slots = NULL; in alloc_vm()
249 TEST_ASSERT(nslots > 1 || nslots == -1, in prepare_vm()
251 if (nslots != -1) in prepare_vm()
258 data->npages = mempages; in prepare_vm()
259 data->nslots = max_mem_slots - 1; in prepare_vm()
260 data->pages_per_slot = mempages / data->nslots; in prepare_vm()
261 if (!data->pages_per_slot) { in prepare_vm()
266 rempages = mempages % data->nslots; in prepare_vm()
267 data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); in prepare_vm()
268 TEST_ASSERT(data->hva_slots, "malloc() fail"); in prepare_vm()
270 data->vm = vm_create_default(VCPU_ID, mempages, guest_code); in prepare_vm()
273 max_mem_slots - 1, data->pages_per_slot, rempages); in prepare_vm()
279 npages = data->pages_per_slot; in prepare_vm()
280 if (slot == max_mem_slots - 1) in prepare_vm()
283 vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS, in prepare_vm()
290 for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) { in prepare_vm()
294 npages = data->pages_per_slot; in prepare_vm()
295 if (slot == max_mem_slots - 2) in prepare_vm()
298 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, in prepare_vm()
303 data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr); in prepare_vm()
304 memset(data->hva_slots[slot], 0, npages * 4096); in prepare_vm()
309 virt_map(data->vm, MEM_GPA, MEM_GPA, mempages); in prepare_vm()
312 atomic_init(&sync->start_flag, false); in prepare_vm()
313 atomic_init(&sync->exit_flag, false); in prepare_vm()
314 atomic_init(&sync->sync_flag, false); in prepare_vm()
316 data->mmio_ok = false; in prepare_vm()
323 pr_info_v("Launching the test VM\n"); in launch_vm()
325 pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data); in launch_vm()
333 kvm_vm_free(data->vm); in free_vm()
334 free(data->hva_slots); in free_vm()
340 pthread_join(data->vcpu_thread, NULL); in wait_guest_exit()
345 atomic_store_explicit(&sync->start_flag, true, memory_order_release); in let_guest_run()
352 while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire)) in guest_spin_until_start()
358 atomic_store_explicit(&sync->exit_flag, true, memory_order_release); in make_guest_exit()
365 return atomic_load_explicit(&sync->exit_flag, memory_order_acquire); in _guest_should_exit()
380 atomic_store_explicit(&sync->sync_flag, true, memory_order_release); in host_perform_sync()
381 while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire)) in host_perform_sync()
397 } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag, in guest_perform_sync()
408 uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); in guest_code_test_memslot_move()
474 * We can afford to access (map) just a small number of pages in guest_code_test_memslot_unmap()
478 * effectively turn this test into a map performance test. in guest_code_test_memslot_unmap()
534 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_prepare()
546 movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1)); in test_memslot_move_prepare()
547 sync->move_area_ptr = (void *)movetestgpa; in test_memslot_move_prepare()
550 data->mmio_ok = true; in test_memslot_move_prepare()
551 data->mmio_gpa_min = movesrcgpa; in test_memslot_move_prepare()
552 data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1; in test_memslot_move_prepare()
576 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_loop()
577 vm_mem_region_move(data->vm, data->nslots - 1 + 1, in test_memslot_move_loop()
579 vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa); in test_memslot_move_loop()
594 npages = min(npages, count - ctr); in test_memslot_do_unmap()
597 "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64, in test_memslot_do_unmap()
642 MEM_TEST_MAP_SIZE_PAGES / 2 - 1, in test_memslot_map_loop()
659 test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1, in test_memslot_map_loop()
742 uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES; in test_execute()
749 if (!prepare_vm(data, nslots, maxslots, tdata->guest_code, in test_execute()
757 if (tdata->prepare && in test_execute()
758 !tdata->prepare(data, sync, maxslots)) { in test_execute()
770 if (guest_runtime->tv_sec >= maxtime) in test_execute()
773 tdata->loop(data, sync); in test_execute()
789 .name = "map",
839 …pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r r… in help()
841 pr_info(" -h: print this help screen.\n"); in help()
842 pr_info(" -v: enable verbose mode (not for benchmarking).\n"); in help()
843 pr_info(" -d: enable extra debug checks.\n"); in help()
844 pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n", in help()
845 targs->nslots); in help()
846 pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n", in help()
847 targs->tfirst, NTESTS - 1); in help()
848 pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n", in help()
849 targs->tlast, NTESTS - 1); in help()
850 pr_info(" -l: specify the test length in seconds (currently: %i)\n", in help()
851 targs->seconds); in help()
852 pr_info(" -r: specify the number of runs per test (currently: %i)\n", in help()
853 targs->runs); in help()
865 while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) { in parse_args()
878 targs->nslots = atoi(optarg); in parse_args()
879 if (targs->nslots <= 0 && targs->nslots != -1) { in parse_args()
880 pr_info("Slot count cap has to be positive or -1 for no cap\n"); in parse_args()
885 targs->tfirst = atoi(optarg); in parse_args()
886 if (targs->tfirst < 0) { in parse_args()
887 pr_info("First test to run has to be non-negative\n"); in parse_args()
892 targs->tlast = atoi(optarg); in parse_args()
893 if (targs->tlast < 0 || targs->tlast >= NTESTS) { in parse_args()
894 pr_info("Last test to run has to be non-negative and less than %zu\n", in parse_args()
900 targs->seconds = atoi(optarg); in parse_args()
901 if (targs->seconds < 0) { in parse_args()
902 pr_info("Test length in seconds has to be non-negative\n"); in parse_args()
907 targs->runs = atoi(optarg); in parse_args()
908 if (targs->runs <= 0) { in parse_args()
921 if (targs->tfirst > targs->tlast) { in parse_args()
944 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, in test_loop()
960 pr_info("No full loops done - too short test time or system too loaded?\n"); in test_loop()
977 if (!data->mem_size && in test_loop()
978 (!rbestslottime->slottimens || in test_loop()
979 result.slottimens < rbestslottime->slottimens)) in test_loop()
981 if (!rbestruntime->runtimens || in test_loop()
982 result.runtimens < rbestruntime->runtimens) in test_loop()
992 .tlast = NTESTS - 1, in main()
993 .nslots = -1, in main()
1004 return -1; in main()
1016 data->name, targs.runs, targs.seconds); in main()