Lines Matching +full:memory +full:- +full:region
1 // SPDX-License-Identifier: GPL-2.0-only
28 size_t mask = size - 1; in align()
29 TEST_ASSERT(size != 0 && !(size & (size - 1)), in align()
38 * flags - The flags to pass when opening KVM_DEV_PATH.
66 * cap - Capability
96 * vm - Virtual Machine
97 * cap - Capability
109 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
119 * vm - Virtual Machine
120 * vcpu_id - VCPU
121 * cap - Capability
137 r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap); in vcpu_enable_cap()
151 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
156 vm->kvm_fd = _open_kvm_dev_path_or_exit(perm); in vm_open()
163 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); in vm_open()
164 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " in vm_open()
165 "rc: %i errno: %i", vm->fd, errno); in vm_open()
171 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", in vm_guest_mode_string()
172 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", in vm_guest_mode_string()
173 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", in vm_guest_mode_string()
174 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", in vm_guest_mode_string()
175 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", in vm_guest_mode_string()
176 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", in vm_guest_mode_string()
177 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", in vm_guest_mode_string()
178 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", in vm_guest_mode_string()
179 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", in vm_guest_mode_string()
207 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
208 * phy_pages - Physical memory pages
209 * perm - permission
217 * When phy_pages is non-zero, a memory region of phy_pages physical pages
230 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in vm_create()
232 INIT_LIST_HEAD(&vm->vcpus); in vm_create()
233 vm->regions.gpa_tree = RB_ROOT; in vm_create()
234 vm->regions.hva_tree = RB_ROOT; in vm_create()
235 hash_init(vm->regions.slot_hash); in vm_create()
237 vm->mode = mode; in vm_create()
238 vm->type = 0; in vm_create()
240 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; in vm_create()
241 vm->va_bits = vm_guest_mode_params[mode].va_bits; in vm_create()
242 vm->page_size = vm_guest_mode_params[mode].page_size; in vm_create()
243 vm->page_shift = vm_guest_mode_params[mode].page_shift; in vm_create()
246 switch (vm->mode) { in vm_create()
248 vm->pgtable_levels = 4; in vm_create()
251 vm->pgtable_levels = 3; in vm_create()
254 vm->pgtable_levels = 4; in vm_create()
257 vm->pgtable_levels = 3; in vm_create()
260 vm->pgtable_levels = 4; in vm_create()
263 vm->pgtable_levels = 3; in vm_create()
267 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in vm_create()
269 * Ignore KVM support for 5-level paging (vm->va_bits == 57), in vm_create()
273 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in vm_create()
275 vm->va_bits); in vm_create()
277 vm->pa_bits); in vm_create()
278 vm->pgtable_levels = 4; in vm_create()
279 vm->va_bits = 48; in vm_create()
281 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); in vm_create()
285 vm->pgtable_levels = 5; in vm_create()
288 vm->pgtable_levels = 5; in vm_create()
295 if (vm->pa_bits != 40) in vm_create()
296 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in vm_create()
301 /* Limit to VA-bit canonical virtual addresses. */ in vm_create()
302 vm->vpages_valid = sparsebit_alloc(); in vm_create()
303 sparsebit_set_num(vm->vpages_valid, in vm_create()
304 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create()
305 sparsebit_set_num(vm->vpages_valid, in vm_create()
306 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_create()
307 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_create()
309 /* Limit physical addresses to PA-bits. */ in vm_create()
310 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_create()
312 /* Allocate and setup memory for guest. */ in vm_create()
313 vm->vpages_mapped = sparsebit_alloc(); in vm_create()
325 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
326 * nr_vcpus - VCPU count
327 * slot0_mem_pages - Slot0 physical memory size
328 * extra_mem_pages - Non-slot0 physical memory total size
329 * num_percpu_pages - Per-cpu physical memory pages
330 * guest_code - Guest entry point
331 * vcpuids - VCPU IDs
339 * with customized slot0 memory size, at least 512 pages currently.
341 * no real memory allocation for non-slot0 memory in this function.
352 /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */ in vm_create_with_vcpus()
356 /* The maximum page table size for a memory region will be when the in vm_create_with_vcpus()
367 "nr_vcpus = %d too large for host, max-vcpus = %d", in vm_create_with_vcpus()
407 * vm - VM that has been released before
408 * perm - permission
413 * global state, such as the irqchip and the memory regions that are mapped
419 struct userspace_mem_region *region; in kvm_vm_restart() local
422 if (vmp->has_irqchip) in kvm_vm_restart()
425 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
426 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in kvm_vm_restart()
431 ret, errno, region->region.slot, in kvm_vm_restart()
432 region->region.flags, in kvm_vm_restart()
433 region->region.guest_phys_addr, in kvm_vm_restart()
434 region->region.memory_size); in kvm_vm_restart()
443 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
445 __func__, strerror(-ret)); in kvm_vm_get_dirty_log()
456 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); in kvm_vm_clear_dirty_log()
458 __func__, strerror(-ret)); in kvm_vm_clear_dirty_log()
463 return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS); in kvm_vm_reset_dirty_ring()
467 * Userspace Memory Region Find
470 * vm - Virtual Machine
471 * start - Starting VM physical address
472 * end - Ending VM physical address, inclusive.
477 * Pointer to overlapping region, NULL if no such region.
479 * Searches for a region with any physical memory that overlaps with
483 * region exists.
490 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
491 struct userspace_mem_region *region = in userspace_mem_region_find() local
493 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
494 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
495 + region->region.memory_size - 1; in userspace_mem_region_find()
497 return region; in userspace_mem_region_find()
500 node = node->rb_left; in userspace_mem_region_find()
502 node = node->rb_right; in userspace_mem_region_find()
509 * KVM Userspace Memory Region Find
512 * vm - Virtual Machine
513 * start - Starting VM physical address
514 * end - Ending VM physical address, inclusive.
519 * Pointer to overlapping region, NULL if no such region.
522 * the memslot datastructure for a given range of guest physical memory.
528 struct userspace_mem_region *region; in kvm_userspace_memory_region_find() local
530 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
531 if (!region) in kvm_userspace_memory_region_find()
534 return ®ion->region; in kvm_userspace_memory_region_find()
541 * vm - Virtual Machine
542 * vcpuid - VCPU ID
557 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_find()
558 if (vcpu->id == vcpuid) in vcpu_find()
569 * vcpu - VCPU to remove
581 if (vcpu->dirty_gfns) { in vm_vcpu_rm()
582 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
585 vcpu->dirty_gfns = NULL; in vm_vcpu_rm()
588 ret = munmap(vcpu->state, vcpu_mmap_sz()); in vm_vcpu_rm()
591 ret = close(vcpu->fd); in vm_vcpu_rm()
595 list_del(&vcpu->list); in vm_vcpu_rm()
604 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) in kvm_vm_release()
607 ret = close(vmp->fd); in kvm_vm_release()
609 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); in kvm_vm_release()
611 ret = close(vmp->kvm_fd); in kvm_vm_release()
613 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); in kvm_vm_release()
617 struct userspace_mem_region *region, in __vm_mem_region_delete() argument
623 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
624 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
625 hash_del(®ion->slot_node); in __vm_mem_region_delete()
628 region->region.memory_size = 0; in __vm_mem_region_delete()
629 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in __vm_mem_region_delete()
633 sparsebit_free(®ion->unused_phy_pages); in __vm_mem_region_delete()
634 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
637 free(region); in __vm_mem_region_delete()
647 struct userspace_mem_region *region; in kvm_vm_free() local
653 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
654 __vm_mem_region_delete(vmp, region, false); in kvm_vm_free()
657 sparsebit_free(&vmp->vpages_valid); in kvm_vm_free()
658 sparsebit_free(&vmp->vpages_mapped); in kvm_vm_free()
667 * Memory Compare, host virtual to guest virtual
670 * hva - Starting host virtual address
671 * vm - Virtual Machine
672 * gva - Starting guest virtual address
673 * len - number of bytes to compare
710 amt = len - offset; in kvm_memcmp_hva_gva()
711 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
712 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
713 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
714 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
716 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
717 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
730 * No mismatch found. Let the caller know the two memory in kvm_memcmp_hva_gva()
737 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
741 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { in vm_userspace_mem_region_gpa_insert()
746 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
747 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
748 cur = &(*cur)->rb_left; in vm_userspace_mem_region_gpa_insert()
750 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
751 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
752 "Duplicate GPA in region tree"); in vm_userspace_mem_region_gpa_insert()
754 cur = &(*cur)->rb_right; in vm_userspace_mem_region_gpa_insert()
758 rb_link_node(®ion->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
759 rb_insert_color(®ion->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
763 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
767 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { in vm_userspace_mem_region_hva_insert()
772 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
773 cur = &(*cur)->rb_left; in vm_userspace_mem_region_hva_insert()
775 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
776 cregion->host_mem, in vm_userspace_mem_region_hva_insert()
777 "Duplicate HVA in region tree"); in vm_userspace_mem_region_hva_insert()
779 cur = &(*cur)->rb_right; in vm_userspace_mem_region_hva_insert()
783 rb_link_node(®ion->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
784 rb_insert_color(®ion->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
788 * VM Userspace Memory Region Add
791 * vm - Virtual Machine
792 * src_type - Storage source for this region.
793 * NULL to use anonymous memory.
794 * guest_paddr - Starting guest physical address
795 * slot - KVM region slot
796 * npages - Number of physical pages
797 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
803 * Allocates a memory area of the number of pages specified by npages
805 * given by guest_paddr. The region is created with a KVM region slot
807 * region is created with the flags given by flags.
815 struct userspace_mem_region *region; in vm_userspace_mem_region_add() local
819 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_userspace_mem_region_add()
821 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_userspace_mem_region_add()
823 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
825 " guest_paddr: 0x%lx vm->page_size: 0x%x", in vm_userspace_mem_region_add()
826 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
827 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
828 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
831 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", in vm_userspace_mem_region_add()
832 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
835 * Confirm a mem region with an overlapping address doesn't in vm_userspace_mem_region_add()
838 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_userspace_mem_region_add()
839 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
840 if (region != NULL) in vm_userspace_mem_region_add()
846 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
847 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
848 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
850 /* Confirm no region with the requested slot already exists. */ in vm_userspace_mem_region_add()
851 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
853 if (region->region.slot != slot) in vm_userspace_mem_region_add()
856 TEST_FAIL("A mem region with the requested slot " in vm_userspace_mem_region_add()
861 region->region.slot, in vm_userspace_mem_region_add()
862 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
863 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
866 /* Allocate and initialize new mem region structure. */ in vm_userspace_mem_region_add()
867 region = calloc(1, sizeof(*region)); in vm_userspace_mem_region_add()
868 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_userspace_mem_region_add()
869 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
881 /* Add enough memory to align up if necessary */ in vm_userspace_mem_region_add()
883 region->mmap_size += alignment; in vm_userspace_mem_region_add()
885 region->fd = -1; in vm_userspace_mem_region_add()
892 region->fd = memfd_create("kvm_selftest", memfd_flags); in vm_userspace_mem_region_add()
893 TEST_ASSERT(region->fd != -1, in vm_userspace_mem_region_add()
896 ret = ftruncate(region->fd, region->mmap_size); in vm_userspace_mem_region_add()
899 ret = fallocate(region->fd, in vm_userspace_mem_region_add()
901 region->mmap_size); in vm_userspace_mem_region_add()
905 region->mmap_start = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
907 vm_mem_backing_src_alias(src_type)->flag, in vm_userspace_mem_region_add()
908 region->fd, 0); in vm_userspace_mem_region_add()
909 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_userspace_mem_region_add()
911 region->mmap_start, errno); in vm_userspace_mem_region_add()
914 region->host_mem = align(region->mmap_start, alignment); in vm_userspace_mem_region_add()
919 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
922 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
923 vm_mem_backing_src_alias(src_type)->name); in vm_userspace_mem_region_add()
926 region->unused_phy_pages = sparsebit_alloc(); in vm_userspace_mem_region_add()
927 sparsebit_set_num(region->unused_phy_pages, in vm_userspace_mem_region_add()
928 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
929 region->region.slot = slot; in vm_userspace_mem_region_add()
930 region->region.flags = flags; in vm_userspace_mem_region_add()
931 region->region.guest_phys_addr = guest_paddr; in vm_userspace_mem_region_add()
932 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
933 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_userspace_mem_region_add()
934 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
940 guest_paddr, (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
943 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
944 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
945 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_userspace_mem_region_add()
947 /* If shared memory, create an alias. */ in vm_userspace_mem_region_add()
948 if (region->fd >= 0) { in vm_userspace_mem_region_add()
949 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
951 vm_mem_backing_src_alias(src_type)->flag, in vm_userspace_mem_region_add()
952 region->fd, 0); in vm_userspace_mem_region_add()
953 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_userspace_mem_region_add()
957 region->host_alias = align(region->mmap_alias, alignment); in vm_userspace_mem_region_add()
962 * Memslot to region
965 * vm - Virtual Machine
966 * memslot - KVM memory slot ID
971 * Pointer to memory region structure that describe memory region
972 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
973 * on error (e.g. currently no memory region using memslot as a KVM
974 * memory slot ID).
979 struct userspace_mem_region *region; in memslot2region() local
981 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
983 if (region->region.slot == memslot) in memslot2region()
984 return region; in memslot2region()
986 fprintf(stderr, "No mem region with the requested slot found,\n" in memslot2region()
988 fputs("---- vm dump ----\n", stderr); in memslot2region()
990 TEST_FAIL("Mem region not found"); in memslot2region()
995 * VM Memory Region Flags Set
998 * vm - Virtual Machine
999 * flags - Starting guest physical address
1005 * Sets the flags of the memory region specified by the value of slot,
1011 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1013 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1015 region->region.flags = flags; in vm_mem_region_set_flags()
1017 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
1025 * VM Memory Region Move
1028 * vm - Virtual Machine
1029 * slot - Slot of the memory region to move
1030 * new_gpa - Starting guest physical address
1036 * Change the gpa of a memory region.
1040 struct userspace_mem_region *region; in vm_mem_region_move() local
1043 region = memslot2region(vm, slot); in vm_mem_region_move()
1045 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1047 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_move()
1055 * VM Memory Region Delete
1058 * vm - Virtual Machine
1059 * slot - Slot of the memory region to delete
1065 * Delete a memory region.
1105 * vm - Virtual Machine
1106 * vcpuid - VCPU ID
1126 vcpuid, vcpu->id, vcpu->state); in vm_vcpu_add()
1130 TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); in vm_vcpu_add()
1131 vcpu->id = vcpuid; in vm_vcpu_add()
1132 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); in vm_vcpu_add()
1133 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i", in vm_vcpu_add()
1134 vcpu->fd, errno); in vm_vcpu_add()
1136 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size " in vm_vcpu_add()
1138 vcpu_mmap_sz(), sizeof(*vcpu->state)); in vm_vcpu_add()
1139 vcpu->state = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), in vm_vcpu_add()
1140 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); in vm_vcpu_add()
1141 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, " in vm_vcpu_add()
1144 /* Add to linked-list of VCPUs. */ in vm_vcpu_add()
1145 list_add(&vcpu->list, &vm->vcpus); in vm_vcpu_add()
1152 * vm - Virtual Machine
1153 * sz - Size (bytes)
1154 * vaddr_min - Minimum Virtual Address
1171 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1174 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1175 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1179 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1181 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1190 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1193 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1202 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1205 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1215 return -1; in vm_vaddr_unused_gap()
1218 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1224 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1231 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1238 * vm - Virtual Machine
1239 * sz - Size in bytes
1240 * vaddr_min - Minimum starting virtual address
1241 * data_memslot - Memory region slot for data pages
1242 * pgd_memslot - Memory region slot for new virtual translation tables
1257 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in vm_vaddr_alloc()
1261 KVM_UTIL_MIN_PFN * vm->page_size, 0); in vm_vaddr_alloc()
1271 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in vm_vaddr_alloc()
1275 sparsebit_set(vm->vpages_mapped, in vm_vaddr_alloc()
1276 vaddr >> vm->page_shift); in vm_vaddr_alloc()
1286 * vm - Virtual Machine
1305 * vm - Virtual Machine
1324 * vm - Virtual Machine
1325 * vaddr - Virtuall address to map
1326 * paddr - VM Physical Address
1327 * npages - The number of pages to map
1328 * pgd_memslot - Memory region slot for new virtual translation tables
1340 size_t page_size = vm->page_size; in virt_map()
1346 while (npages--) { in virt_map()
1357 * vm - Virtual Machine
1358 * gpa - VM physical address
1365 * Locates the memory region containing the VM physical address given
1367 * address providing the memory to the vm physical address is returned.
1368 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1372 struct userspace_mem_region *region; in addr_gpa2hva() local
1374 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1375 if (!region) { in addr_gpa2hva()
1376 TEST_FAIL("No vm physical memory at 0x%lx", gpa); in addr_gpa2hva()
1380 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1381 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1388 * vm - Virtual Machine
1389 * hva - Host virtual address
1396 * Locates the memory region containing the host virtual address given
1399 * region containing hva exists.
1405 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1406 struct userspace_mem_region *region = in addr_hva2gpa() local
1409 if (hva >= region->host_mem) { in addr_hva2gpa()
1410 if (hva <= (region->host_mem in addr_hva2gpa()
1411 + region->region.memory_size - 1)) in addr_hva2gpa()
1413 region->region.guest_phys_addr in addr_hva2gpa()
1414 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1416 node = node->rb_right; in addr_hva2gpa()
1418 node = node->rb_left; in addr_hva2gpa()
1422 return -1; in addr_hva2gpa()
1429 * vm - Virtual Machine
1430 * gpa - VM physical address
1436 * (without failing the test) if the guest memory is not shared (so
1439 * When vm_create() and related functions are called with a shared memory
1441 * underlying guest memory. This allows the host to manipulate guest memory
1442 * without mapping that memory in the guest's address space. And, for
1443 * userfaultfd-based demand paging, we can do so without triggering userfaults.
1447 struct userspace_mem_region *region; in addr_gpa2alias() local
1450 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1451 if (!region) in addr_gpa2alias()
1454 if (!region->host_alias) in addr_gpa2alias()
1457 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1458 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1465 * vm - Virtual Machine
1477 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); in vm_create_irqchip()
1481 vm->has_irqchip = true; in vm_create_irqchip()
1488 * vm - Virtual Machine
1489 * vcpuid - VCPU ID
1504 return vcpu->state; in vcpu_state()
1511 * vm - Virtual Machine
1512 * vcpuid - VCPU ID
1535 rc = ioctl(vcpu->fd, KVM_RUN, NULL); in _vcpu_run()
1536 } while (rc == -1 && errno == EINTR); in _vcpu_run()
1549 return vcpu->fd; in vcpu_get_fd()
1559 vcpu->state->immediate_exit = 1; in vcpu_run_complete_io()
1560 ret = ioctl(vcpu->fd, KVM_RUN, NULL); in vcpu_run_complete_io()
1561 vcpu->state->immediate_exit = 0; in vcpu_run_complete_io()
1563 TEST_ASSERT(ret == -1 && errno == EINTR, in vcpu_run_complete_io()
1572 int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug); in vcpu_set_guest_debug()
1581 * vm - Virtual Machine
1582 * vcpuid - VCPU ID
1583 * mp_state - mp_state to be set
1600 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state); in vcpu_set_mp_state()
1609 * vm - Virtual Machine
1610 * vcpuid - VCPU ID
1627 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); in vcpu_get_reg_list()
1629 reg_list->n = reg_list_n.n; in vcpu_get_reg_list()
1638 * vm - Virtual Machine
1639 * vcpuid - VCPU ID
1642 * regs - current state of VCPU regs
1656 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); in vcpu_regs_get()
1665 * vm - Virtual Machine
1666 * vcpuid - VCPU ID
1667 * regs - Values to set VCPU regs to
1683 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); in vcpu_regs_set()
1697 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); in vcpu_events_get()
1710 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); in vcpu_events_set()
1725 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state); in vcpu_nested_state_get()
1739 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state); in vcpu_nested_state_set()
1754 * vm - Virtual Machine
1755 * vcpuid - VCPU ID
1758 * sregs - current state of VCPU system regs
1772 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); in vcpu_sregs_get()
1781 * vm - Virtual Machine
1782 * vcpuid - VCPU ID
1783 * sregs - Values to set VCPU system regs to
1805 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); in _vcpu_sregs_set()
1848 * vm - Virtual Machine
1849 * vcpuid - VCPU ID
1850 * cmd - Ioctl number
1851 * arg - Argument to pass to the ioctl
1875 ret = ioctl(vcpu->fd, cmd, arg); in _vcpu_ioctl()
1883 uint32_t size = vm->dirty_ring_size; in vcpu_map_dirty_ring()
1891 if (!vcpu->dirty_gfns) { in vcpu_map_dirty_ring()
1895 MAP_PRIVATE, vcpu->fd, in vcpu_map_dirty_ring()
1896 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); in vcpu_map_dirty_ring()
1900 MAP_PRIVATE, vcpu->fd, in vcpu_map_dirty_ring()
1901 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); in vcpu_map_dirty_ring()
1905 MAP_SHARED, vcpu->fd, in vcpu_map_dirty_ring()
1906 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET); in vcpu_map_dirty_ring()
1909 vcpu->dirty_gfns = addr; in vcpu_map_dirty_ring()
1910 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); in vcpu_map_dirty_ring()
1913 return vcpu->dirty_gfns; in vcpu_map_dirty_ring()
1920 * vm - Virtual Machine
1921 * cmd - Ioctl number
1922 * arg - Argument to pass to the ioctl
1939 return ioctl(vm->fd, cmd, arg); in _vm_ioctl()
1946 * vm - Virtual Machine
1947 * cmd - Ioctl number
1948 * arg - Argument to pass to the ioctl
1958 ret = ioctl(vm->kvm_fd, cmd, arg); in kvm_ioctl()
1965 return ioctl(vm->kvm_fd, cmd, arg); in _kvm_ioctl()
1997 create_dev.fd = -1; in _kvm_create_device()
2047 * vm - Virtual Machine
2048 * indent - Left margin indent amount
2051 * stream - Output FILE stream
2061 struct userspace_mem_region *region; in vm_dump() local
2064 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
2065 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
2066 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
2068 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
2071 (uint64_t) region->region.guest_phys_addr, in vm_dump()
2072 (uint64_t) region->region.memory_size, in vm_dump()
2073 region->host_mem); in vm_dump()
2075 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
2078 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
2080 vm->pgd_created); in vm_dump()
2081 if (vm->pgd_created) { in vm_dump()
2087 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
2088 vcpu_dump(stream, vm, vcpu->id, indent + 2); in vm_dump()
2129 * exit_reason - Exit reason
2156 * vm - Virtual Machine
2157 * num - number of pages
2158 * paddr_min - Physical address minimum
2159 * memslot - Memory region to allocate page from
2174 struct userspace_mem_region *region; in vm_phy_pages_alloc() local
2179 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_pages_alloc()
2182 paddr_min, vm->page_size); in vm_phy_pages_alloc()
2184 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
2185 base = pg = paddr_min >> vm->page_shift; in vm_phy_pages_alloc()
2189 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in vm_phy_pages_alloc()
2190 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
2199 paddr_min, vm->page_size, memslot); in vm_phy_pages_alloc()
2200 fputs("---- vm dump ----\n", stderr); in vm_phy_pages_alloc()
2206 sparsebit_clear(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
2208 return base * vm->page_size; in vm_phy_pages_alloc()
2229 * vm - Virtual Machine
2230 * gva - VM virtual address
2246 * vm - Virtual Machine
2261 /* Ensure that the KVM vendor-specific module is loaded. */ in vm_is_unrestricted_guest()
2277 return vm->page_size; in vm_get_page_size()
2282 return vm->page_shift; in vm_get_page_shift()
2287 return vm->max_gfn; in vm_get_max_gfn()
2292 return vm->fd; in vm_get_fd()
2300 unsigned int n = 1 << (new_page_shift - page_shift); in vm_calc_num_pages()
2303 return num_pages * (1 << (page_shift - new_page_shift)); in vm_calc_num_pages()
2310 return __builtin_ffs(getpagesize()) - 1; in getpageshift()
2337 return ioctl(vm->fd, KVM_GET_STATS_FD, NULL); in vm_get_stats_fd()
2344 return ioctl(vcpu->fd, KVM_GET_STATS_FD, NULL); in vcpu_get_stats_fd()