Lines Matching +full:memory +full:- +full:region

1 // SPDX-License-Identifier: GPL-2.0-only
38 * flags - The flags to pass when opening KVM_DEV_PATH.
96 * cap - Capability
128 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
133 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
137 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
138 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
144 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", in vm_guest_mode_string()
145 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", in vm_guest_mode_string()
146 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", in vm_guest_mode_string()
147 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages", in vm_guest_mode_string()
148 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", in vm_guest_mode_string()
149 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", in vm_guest_mode_string()
150 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages", in vm_guest_mode_string()
151 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", in vm_guest_mode_string()
152 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", in vm_guest_mode_string()
153 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", in vm_guest_mode_string()
154 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", in vm_guest_mode_string()
155 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", in vm_guest_mode_string()
156 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages", in vm_guest_mode_string()
157 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", in vm_guest_mode_string()
158 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages", in vm_guest_mode_string()
196 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in ____vm_create()
198 INIT_LIST_HEAD(&vm->vcpus); in ____vm_create()
199 vm->regions.gpa_tree = RB_ROOT; in ____vm_create()
200 vm->regions.hva_tree = RB_ROOT; in ____vm_create()
201 hash_init(vm->regions.slot_hash); in ____vm_create()
203 vm->mode = mode; in ____vm_create()
204 vm->type = 0; in ____vm_create()
206 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; in ____vm_create()
207 vm->va_bits = vm_guest_mode_params[mode].va_bits; in ____vm_create()
208 vm->page_size = vm_guest_mode_params[mode].page_size; in ____vm_create()
209 vm->page_shift = vm_guest_mode_params[mode].page_shift; in ____vm_create()
212 switch (vm->mode) { in ____vm_create()
214 vm->pgtable_levels = 4; in ____vm_create()
217 vm->pgtable_levels = 3; in ____vm_create()
220 vm->pgtable_levels = 4; in ____vm_create()
223 vm->pgtable_levels = 3; in ____vm_create()
227 vm->pgtable_levels = 4; in ____vm_create()
231 vm->pgtable_levels = 3; in ____vm_create()
236 vm->pgtable_levels = 4; in ____vm_create()
239 vm->pgtable_levels = 3; in ____vm_create()
243 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in ____vm_create()
245 * Ignore KVM support for 5-level paging (vm->va_bits == 57), in ____vm_create()
249 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in ____vm_create()
251 vm->va_bits); in ____vm_create()
253 vm->pa_bits); in ____vm_create()
254 vm->pgtable_levels = 4; in ____vm_create()
255 vm->va_bits = 48; in ____vm_create()
257 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); in ____vm_create()
261 vm->pgtable_levels = 5; in ____vm_create()
264 vm->pgtable_levels = 5; in ____vm_create()
271 if (vm->pa_bits != 40) in ____vm_create()
272 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create()
277 /* Limit to VA-bit canonical virtual addresses. */ in ____vm_create()
278 vm->vpages_valid = sparsebit_alloc(); in ____vm_create()
279 sparsebit_set_num(vm->vpages_valid, in ____vm_create()
280 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in ____vm_create()
281 sparsebit_set_num(vm->vpages_valid, in ____vm_create()
282 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in ____vm_create()
283 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in ____vm_create()
285 /* Limit physical addresses to PA-bits. */ in ____vm_create()
286 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create()
288 /* Allocate and setup memory for guest. */ in ____vm_create()
289 vm->vpages_mapped = sparsebit_alloc(); in ____vm_create()
307 "nr_vcpus = %d too large for host, max-vcpus = %d", in vm_nr_pages_required()
312 * test code and other per-VM assets that will be loaded into memslot0. in vm_nr_pages_required()
316 /* Account for the per-vCPU stacks on behalf of the test. */ in vm_nr_pages_required()
321 * maximum page table size for a memory region will be when the in vm_nr_pages_required()
353 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
354 * nr_vcpus - VCPU count
355 * extra_mem_pages - Non-slot0 physical memory total size
356 * guest_code - Guest entry point
357 * vcpuids - VCPU IDs
366 * no real memory allocation for non-slot0 memory in this function.
403 * vm - VM that has been released before
408 * global state, such as the irqchip and the memory regions that are mapped
414 struct userspace_mem_region *region; in kvm_vm_restart() local
417 if (vmp->has_irqchip) in kvm_vm_restart()
420 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
421 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in kvm_vm_restart()
426 ret, errno, region->region.slot, in kvm_vm_restart()
427 region->region.flags, in kvm_vm_restart()
428 region->region.guest_phys_addr, in kvm_vm_restart()
429 region->region.memory_size); in kvm_vm_restart()
447 * Userspace Memory Region Find
450 * vm - Virtual Machine
451 * start - Starting VM physical address
452 * end - Ending VM physical address, inclusive.
457 * Pointer to overlapping region, NULL if no such region.
459 * Searches for a region with any physical memory that overlaps with
463 * region exists.
470 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
471 struct userspace_mem_region *region = in userspace_mem_region_find() local
473 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
474 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
475 + region->region.memory_size - 1; in userspace_mem_region_find()
477 return region; in userspace_mem_region_find()
480 node = node->rb_left; in userspace_mem_region_find()
482 node = node->rb_right; in userspace_mem_region_find()
489 * KVM Userspace Memory Region Find
492 * vm - Virtual Machine
493 * start - Starting VM physical address
494 * end - Ending VM physical address, inclusive.
499 * Pointer to overlapping region, NULL if no such region.
502 * the memslot datastructure for a given range of guest physical memory.
508 struct userspace_mem_region *region; in kvm_userspace_memory_region_find() local
510 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
511 if (!region) in kvm_userspace_memory_region_find()
514 return &region->region; in kvm_userspace_memory_region_find()
526 * vcpu - VCPU to remove
538 if (vcpu->dirty_gfns) { in vm_vcpu_rm()
539 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
541 vcpu->dirty_gfns = NULL; in vm_vcpu_rm()
544 ret = munmap(vcpu->run, vcpu_mmap_sz()); in vm_vcpu_rm()
547 ret = close(vcpu->fd); in vm_vcpu_rm()
550 list_del(&vcpu->list); in vm_vcpu_rm()
561 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) in kvm_vm_release()
564 ret = close(vmp->fd); in kvm_vm_release()
567 ret = close(vmp->kvm_fd); in kvm_vm_release()
572 struct userspace_mem_region *region, in __vm_mem_region_delete() argument
578 rb_erase(&region->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
579 rb_erase(&region->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
580 hash_del(&region->slot_node); in __vm_mem_region_delete()
583 region->region.memory_size = 0; in __vm_mem_region_delete()
584 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in __vm_mem_region_delete()
586 sparsebit_free(&region->unused_phy_pages); in __vm_mem_region_delete()
587 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
590 free(region); in __vm_mem_region_delete()
600 struct userspace_mem_region *region; in kvm_vm_free() local
606 if (vmp->stats_fd) { in kvm_vm_free()
607 free(vmp->stats_desc); in kvm_vm_free()
608 close(vmp->stats_fd); in kvm_vm_free()
612 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
613 __vm_mem_region_delete(vmp, region, false); in kvm_vm_free()
616 sparsebit_free(&vmp->vpages_valid); in kvm_vm_free()
617 sparsebit_free(&vmp->vpages_mapped); in kvm_vm_free()
634 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd)); in kvm_memfd_alloc()
646 * Memory Compare, host virtual to guest virtual
649 * hva - Starting host virtual address
650 * vm - Virtual Machine
651 * gva - Starting guest virtual address
652 * len - number of bytes to compare
689 amt = len - offset; in kvm_memcmp_hva_gva()
690 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
691 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
692 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
693 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
695 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
696 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
709 * No mismatch found. Let the caller know the two memory in kvm_memcmp_hva_gva()
716 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
720 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { in vm_userspace_mem_region_gpa_insert()
725 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
726 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
727 cur = &(*cur)->rb_left; in vm_userspace_mem_region_gpa_insert()
729 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
730 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
731 "Duplicate GPA in region tree"); in vm_userspace_mem_region_gpa_insert()
733 cur = &(*cur)->rb_right; in vm_userspace_mem_region_gpa_insert()
737 rb_link_node(&region->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
738 rb_insert_color(&region->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
742 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
746 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { in vm_userspace_mem_region_hva_insert()
751 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
752 cur = &(*cur)->rb_left; in vm_userspace_mem_region_hva_insert()
754 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
755 cregion->host_mem, in vm_userspace_mem_region_hva_insert()
756 "Duplicate HVA in region tree"); in vm_userspace_mem_region_hva_insert()
758 cur = &(*cur)->rb_right; in vm_userspace_mem_region_hva_insert()
762 rb_link_node(&region->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
763 rb_insert_color(&region->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
770 struct kvm_userspace_memory_region region = { in __vm_set_user_memory_region() local
778 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region); in __vm_set_user_memory_region()
791 * VM Userspace Memory Region Add
794 * vm - Virtual Machine
795 * src_type - Storage source for this region.
796 * NULL to use anonymous memory.
797 * guest_paddr - Starting guest physical address
798 * slot - KVM region slot
799 * npages - Number of physical pages
800 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
806 * Allocates a memory area of the number of pages specified by npages
808 * given by guest_paddr. The region is created with a KVM region slot
810 * region is created with the flags given by flags.
818 struct userspace_mem_region *region; in vm_userspace_mem_region_add() local
822 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_userspace_mem_region_add()
824 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_userspace_mem_region_add()
826 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
828 " guest_paddr: 0x%lx vm->page_size: 0x%x", in vm_userspace_mem_region_add()
829 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
830 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
831 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
834 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", in vm_userspace_mem_region_add()
835 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
838 * Confirm a mem region with an overlapping address doesn't in vm_userspace_mem_region_add()
841 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_userspace_mem_region_add()
842 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
843 if (region != NULL) in vm_userspace_mem_region_add()
849 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
850 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
851 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
853 /* Confirm no region with the requested slot already exists. */ in vm_userspace_mem_region_add()
854 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
856 if (region->region.slot != slot) in vm_userspace_mem_region_add()
859 TEST_FAIL("A mem region with the requested slot " in vm_userspace_mem_region_add()
864 region->region.slot, in vm_userspace_mem_region_add()
865 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
866 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
869 /* Allocate and initialize new mem region structure. */ in vm_userspace_mem_region_add()
870 region = calloc(1, sizeof(*region)); in vm_userspace_mem_region_add()
871 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_userspace_mem_region_add()
872 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
892 /* Add enough memory to align up if necessary */ in vm_userspace_mem_region_add()
894 region->mmap_size += alignment; in vm_userspace_mem_region_add()
896 region->fd = -1; in vm_userspace_mem_region_add()
898 region->fd = kvm_memfd_alloc(region->mmap_size, in vm_userspace_mem_region_add()
901 region->mmap_start = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
903 vm_mem_backing_src_alias(src_type)->flag, in vm_userspace_mem_region_add()
904 region->fd, 0); in vm_userspace_mem_region_add()
905 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_userspace_mem_region_add()
909 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_userspace_mem_region_add()
911 region->mmap_start, backing_src_pagesz); in vm_userspace_mem_region_add()
914 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_userspace_mem_region_add()
919 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
922 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
923 vm_mem_backing_src_alias(src_type)->name); in vm_userspace_mem_region_add()
926 region->unused_phy_pages = sparsebit_alloc(); in vm_userspace_mem_region_add()
927 sparsebit_set_num(region->unused_phy_pages, in vm_userspace_mem_region_add()
928 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
929 region->region.slot = slot; in vm_userspace_mem_region_add()
930 region->region.flags = flags; in vm_userspace_mem_region_add()
931 region->region.guest_phys_addr = guest_paddr; in vm_userspace_mem_region_add()
932 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
933 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_userspace_mem_region_add()
934 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_userspace_mem_region_add()
940 guest_paddr, (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
943 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
944 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
945 hash_add(vm->regions.slot_hash, &region->slot_node, slot); in vm_userspace_mem_region_add()
947 /* If shared memory, create an alias. */ in vm_userspace_mem_region_add()
948 if (region->fd >= 0) { in vm_userspace_mem_region_add()
949 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
951 vm_mem_backing_src_alias(src_type)->flag, in vm_userspace_mem_region_add()
952 region->fd, 0); in vm_userspace_mem_region_add()
953 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_userspace_mem_region_add()
957 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_userspace_mem_region_add()
962 * Memslot to region
965 * vm - Virtual Machine
966 * memslot - KVM memory slot ID
971 * Pointer to memory region structure that describe memory region
972 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
973 * on error (e.g. currently no memory region using memslot as a KVM
974 * memory slot ID).
979 struct userspace_mem_region *region; in memslot2region() local
981 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
983 if (region->region.slot == memslot) in memslot2region()
984 return region; in memslot2region()
986 fprintf(stderr, "No mem region with the requested slot found,\n" in memslot2region()
988 fputs("---- vm dump ----\n", stderr); in memslot2region()
990 TEST_FAIL("Mem region not found"); in memslot2region()
995 * VM Memory Region Flags Set
998 * vm - Virtual Machine
999 * flags - Starting guest physical address
1005 * Sets the flags of the memory region specified by the value of slot,
1011 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1013 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1015 region->region.flags = flags; in vm_mem_region_set_flags()
1017 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_mem_region_set_flags()
1025 * VM Memory Region Move
1028 * vm - Virtual Machine
1029 * slot - Slot of the memory region to move
1030 * new_gpa - Starting guest physical address
1036 * Change the gpa of a memory region.
1040 struct userspace_mem_region *region; in vm_mem_region_move() local
1043 region = memslot2region(vm, slot); in vm_mem_region_move()
1045 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1047 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_mem_region_move()
1055 * VM Memory Region Delete
1058 * vm - Virtual Machine
1059 * slot - Slot of the memory region to delete
1065 * Delete a memory region.
1092 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_exists()
1093 if (vcpu->id == vcpu_id) in vcpu_exists()
1113 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); in __vm_vcpu_add()
1115 vcpu->vm = vm; in __vm_vcpu_add()
1116 vcpu->id = vcpu_id; in __vm_vcpu_add()
1117 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); in __vm_vcpu_add()
1118 TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd)); in __vm_vcpu_add()
1120 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " in __vm_vcpu_add()
1122 vcpu_mmap_sz(), sizeof(*vcpu->run)); in __vm_vcpu_add()
1123 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), in __vm_vcpu_add()
1124 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); in __vm_vcpu_add()
1125 TEST_ASSERT(vcpu->run != MAP_FAILED, in __vm_vcpu_add()
1128 /* Add to linked-list of VCPUs. */ in __vm_vcpu_add()
1129 list_add(&vcpu->list, &vm->vcpus); in __vm_vcpu_add()
1138 * vm - Virtual Machine
1139 * sz - Size (bytes)
1140 * vaddr_min - Minimum Virtual Address
1157 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1160 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1161 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1165 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1167 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1176 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1179 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1188 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1191 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1201 return -1; in vm_vaddr_unused_gap()
1204 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1210 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1217 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1224 * vm - Virtual Machine
1225 * sz - Size in bytes
1226 * vaddr_min - Minimum starting virtual address
1241 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in vm_vaddr_alloc()
1245 KVM_UTIL_MIN_PFN * vm->page_size, 0); in vm_vaddr_alloc()
1255 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in vm_vaddr_alloc()
1259 sparsebit_set(vm->vpages_mapped, in vm_vaddr_alloc()
1260 vaddr >> vm->page_shift); in vm_vaddr_alloc()
1270 * vm - Virtual Machine
1289 * vm - Virtual Machine
1308 * vm - Virtual Machine
1309 * vaddr - Virtuall address to map
1310 * paddr - VM Physical Address
1311 * npages - The number of pages to map
1323 size_t page_size = vm->page_size; in virt_map()
1329 while (npages--) { in virt_map()
1340 * vm - Virtual Machine
1341 * gpa - VM physical address
1348 * Locates the memory region containing the VM physical address given
1350 * address providing the memory to the vm physical address is returned.
1351 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1355 struct userspace_mem_region *region; in addr_gpa2hva() local
1357 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1358 if (!region) { in addr_gpa2hva()
1359 TEST_FAIL("No vm physical memory at 0x%lx", gpa); in addr_gpa2hva()
1363 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1364 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1371 * vm - Virtual Machine
1372 * hva - Host virtual address
1379 * Locates the memory region containing the host virtual address given
1382 * region containing hva exists.
1388 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1389 struct userspace_mem_region *region = in addr_hva2gpa() local
1392 if (hva >= region->host_mem) { in addr_hva2gpa()
1393 if (hva <= (region->host_mem in addr_hva2gpa()
1394 + region->region.memory_size - 1)) in addr_hva2gpa()
1396 region->region.guest_phys_addr in addr_hva2gpa()
1397 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1399 node = node->rb_right; in addr_hva2gpa()
1401 node = node->rb_left; in addr_hva2gpa()
1405 return -1; in addr_hva2gpa()
1412 * vm - Virtual Machine
1413 * gpa - VM physical address
1419 * (without failing the test) if the guest memory is not shared (so
1424 * memory without mapping said memory in the guest's address space. And, for
1425 * userfaultfd-based demand paging, to do so without triggering userfaults.
1429 struct userspace_mem_region *region; in addr_gpa2alias() local
1432 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1433 if (!region) in addr_gpa2alias()
1436 if (!region->host_alias) in addr_gpa2alias()
1439 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1440 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1448 vm->has_irqchip = true; in vm_create_irqchip()
1457 } while (rc == -1 && errno == EINTR); in _vcpu_run()
1465 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1466 * Assert if the KVM returns an error (other than -EINTR).
1479 vcpu->run->immediate_exit = 1; in vcpu_run_complete_io()
1481 vcpu->run->immediate_exit = 0; in vcpu_run_complete_io()
1483 TEST_ASSERT(ret == -1 && errno == EINTR, in vcpu_run_complete_io()
1499 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); in vcpu_get_reg_list()
1502 reg_list->n = reg_list_n.n; in vcpu_get_reg_list()
1509 uint32_t page_size = vcpu->vm->page_size; in vcpu_map_dirty_ring()
1510 uint32_t size = vcpu->vm->dirty_ring_size; in vcpu_map_dirty_ring()
1514 if (!vcpu->dirty_gfns) { in vcpu_map_dirty_ring()
1517 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd, in vcpu_map_dirty_ring()
1521 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd, in vcpu_map_dirty_ring()
1525 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, in vcpu_map_dirty_ring()
1529 vcpu->dirty_gfns = addr; in vcpu_map_dirty_ring()
1530 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); in vcpu_map_dirty_ring()
1533 return vcpu->dirty_gfns; in vcpu_map_dirty_ring()
1565 .fd = -1, in __kvm_create_device()
1640 assert(routing->nr < KVM_MAX_IRQ_ROUTES); in kvm_gsi_routing_irqchip_add()
1642 i = routing->nr; in kvm_gsi_routing_irqchip_add()
1643 routing->entries[i].gsi = gsi; in kvm_gsi_routing_irqchip_add()
1644 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; in kvm_gsi_routing_irqchip_add()
1645 routing->entries[i].flags = 0; in kvm_gsi_routing_irqchip_add()
1646 routing->entries[i].u.irqchip.irqchip = 0; in kvm_gsi_routing_irqchip_add()
1647 routing->entries[i].u.irqchip.pin = pin; in kvm_gsi_routing_irqchip_add()
1648 routing->nr++; in kvm_gsi_routing_irqchip_add()
1674 * vm - Virtual Machine
1675 * indent - Left margin indent amount
1678 * stream - Output FILE stream
1688 struct userspace_mem_region *region; in vm_dump() local
1691 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1692 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1693 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1695 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1698 (uint64_t) region->region.guest_phys_addr, in vm_dump()
1699 (uint64_t) region->region.memory_size, in vm_dump()
1700 region->host_mem); in vm_dump()
1702 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
1705 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1707 vm->pgd_created); in vm_dump()
1708 if (vm->pgd_created) { in vm_dump()
1715 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
1757 * exit_reason - Exit reason
1784 * vm - Virtual Machine
1785 * num - number of pages
1786 * paddr_min - Physical address minimum
1787 * memslot - Memory region to allocate page from
1802 struct userspace_mem_region *region; in vm_phy_pages_alloc() local
1807 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_pages_alloc()
1810 paddr_min, vm->page_size); in vm_phy_pages_alloc()
1812 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
1813 base = pg = paddr_min >> vm->page_shift; in vm_phy_pages_alloc()
1817 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in vm_phy_pages_alloc()
1818 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
1827 paddr_min, vm->page_size, memslot); in vm_phy_pages_alloc()
1828 fputs("---- vm dump ----\n", stderr); in vm_phy_pages_alloc()
1834 sparsebit_clear(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
1836 return base * vm->page_size; in vm_phy_pages_alloc()
1857 * vm - Virtual Machine
1858 * gva - VM virtual address
1872 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_compute_max_gfn()
1880 unsigned int n = 1 << (new_page_shift - page_shift); in vm_calc_num_pages()
1883 return num_pages * (1 << (page_shift - new_page_shift)); in vm_calc_num_pages()
1890 return __builtin_ffs(getpagesize()) - 1; in getpageshift()
1919 * stats_fd - the file descriptor for the binary stats file from which to read
1920 * header - the binary stats metadata header corresponding to the given FD
1937 total_size = header->num_desc * desc_size; in read_stats_descriptors()
1939 stats_desc = calloc(header->num_desc, desc_size); in read_stats_descriptors()
1940 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors"); in read_stats_descriptors()
1942 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset); in read_stats_descriptors()
1952 * stats_fd - the file descriptor for the binary stats file from which to read
1953 * header - the binary stats metadata header corresponding to the given FD
1954 * desc - the binary stat metadata for the particular stat to be read
1955 * max_elements - the maximum number of 8-byte values to read into data
1958 * data - the buffer into which stat data should be read
1966 size_t nr_elements = min_t(ssize_t, desc->size, max_elements); in read_stat_data()
1970 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name); in read_stat_data()
1971 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name); in read_stat_data()
1974 header->data_offset + desc->offset); in read_stat_data()
1977 desc->name, errno, strerror(errno)); in read_stat_data()
1980 desc->name, size, ret); in read_stat_data()
1987 * vm - the VM for which the stat should be read
1988 * stat_name - the name of the stat to read
1989 * max_elements - the maximum number of 8-byte values to read into data
1992 * data - the buffer into which stat data should be read
2003 if (!vm->stats_fd) { in __vm_get_stat()
2004 vm->stats_fd = vm_get_stats_fd(vm); in __vm_get_stat()
2005 read_stats_header(vm->stats_fd, &vm->stats_header); in __vm_get_stat()
2006 vm->stats_desc = read_stats_descriptors(vm->stats_fd, in __vm_get_stat()
2007 &vm->stats_header); in __vm_get_stat()
2010 size_desc = get_stats_descriptor_size(&vm->stats_header); in __vm_get_stat()
2012 for (i = 0; i < vm->stats_header.num_desc; ++i) { in __vm_get_stat()
2013 desc = (void *)vm->stats_desc + (i * size_desc); in __vm_get_stat()
2015 if (strcmp(desc->name, stat_name)) in __vm_get_stat()
2018 read_stat_data(vm->stats_fd, &vm->stats_header, desc, in __vm_get_stat()