Lines Matching refs:vm

77 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)  in vm_enable_cap()  argument
81 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
88 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
90 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
91 if (vm->kvm_fd < 0) in vm_open()
99 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); in vm_open()
100 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " in vm_open()
101 "rc: %i errno: %i", vm->fd, errno); in vm_open()
137 struct kvm_vm *vm; in _vm_create() local
141 vm = calloc(1, sizeof(*vm)); in _vm_create()
142 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in _vm_create()
144 vm->mode = mode; in _vm_create()
145 vm->type = 0; in _vm_create()
148 switch (vm->mode) { in _vm_create()
150 vm->pgtable_levels = 4; in _vm_create()
151 vm->pa_bits = 52; in _vm_create()
152 vm->va_bits = 48; in _vm_create()
153 vm->page_size = 0x1000; in _vm_create()
154 vm->page_shift = 12; in _vm_create()
157 vm->pgtable_levels = 3; in _vm_create()
158 vm->pa_bits = 52; in _vm_create()
159 vm->va_bits = 48; in _vm_create()
160 vm->page_size = 0x10000; in _vm_create()
161 vm->page_shift = 16; in _vm_create()
164 vm->pgtable_levels = 4; in _vm_create()
165 vm->pa_bits = 48; in _vm_create()
166 vm->va_bits = 48; in _vm_create()
167 vm->page_size = 0x1000; in _vm_create()
168 vm->page_shift = 12; in _vm_create()
171 vm->pgtable_levels = 3; in _vm_create()
172 vm->pa_bits = 48; in _vm_create()
173 vm->va_bits = 48; in _vm_create()
174 vm->page_size = 0x10000; in _vm_create()
175 vm->page_shift = 16; in _vm_create()
178 vm->pgtable_levels = 4; in _vm_create()
179 vm->pa_bits = 40; in _vm_create()
180 vm->va_bits = 48; in _vm_create()
181 vm->page_size = 0x1000; in _vm_create()
182 vm->page_shift = 12; in _vm_create()
185 vm->pgtable_levels = 3; in _vm_create()
186 vm->pa_bits = 40; in _vm_create()
187 vm->va_bits = 48; in _vm_create()
188 vm->page_size = 0x10000; in _vm_create()
189 vm->page_shift = 16; in _vm_create()
193 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in _vm_create()
194 TEST_ASSERT(vm->va_bits == 48, "Linear address width " in _vm_create()
195 "(%d bits) not supported", vm->va_bits); in _vm_create()
196 vm->pgtable_levels = 4; in _vm_create()
197 vm->page_size = 0x1000; in _vm_create()
198 vm->page_shift = 12; in _vm_create()
200 vm->pa_bits); in _vm_create()
211 if (vm->pa_bits != 40) in _vm_create()
212 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in _vm_create()
215 vm_open(vm, perm); in _vm_create()
218 vm->vpages_valid = sparsebit_alloc(); in _vm_create()
219 sparsebit_set_num(vm->vpages_valid, in _vm_create()
220 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in _vm_create()
221 sparsebit_set_num(vm->vpages_valid, in _vm_create()
222 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in _vm_create()
223 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in _vm_create()
226 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in _vm_create()
229 vm->vpages_mapped = sparsebit_alloc(); in _vm_create()
231 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in _vm_create()
234 return vm; in _vm_create()
277 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log() argument
282 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
287 void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log() argument
295 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); in kvm_vm_clear_dirty_log()
320 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
324 for (region = vm->userspace_mem_region_head; region; in userspace_mem_region_find()
353 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, in kvm_userspace_memory_region_find() argument
358 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
381 struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_find() argument
385 for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) { in vcpu_find()
406 static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid) in vm_vcpu_rm() argument
408 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vm_vcpu_rm()
423 vm->vcpu_head = vcpu->next; in vm_vcpu_rm()
506 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() argument
521 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); in kvm_memcmp_hva_gva()
528 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
529 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
530 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
531 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
533 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
534 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
575 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
582 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; in vm_userspace_mem_region_add()
585 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
588 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
589 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
590 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
594 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
601 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
608 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
613 for (region = vm->userspace_mem_region_head; region; in vm_userspace_mem_region_add()
631 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
661 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
667 region->host_mem, npages * vm->page_size, src_type); in vm_userspace_mem_region_add()
672 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
676 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
678 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_userspace_mem_region_add()
687 if (vm->userspace_mem_region_head) in vm_userspace_mem_region_add()
688 vm->userspace_mem_region_head->prev = region; in vm_userspace_mem_region_add()
689 region->next = vm->userspace_mem_region_head; in vm_userspace_mem_region_add()
690 vm->userspace_mem_region_head = region; in vm_userspace_mem_region_add()
709 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument
713 for (region = vm->userspace_mem_region_head; region; in memslot2region()
722 vm_dump(stderr, vm, 2); in memslot2region()
743 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
748 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
752 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_mem_region_set_flags()
804 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) in vm_vcpu_add() argument
809 vcpu = vcpu_find(vm, vcpuid); in vm_vcpu_add()
821 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); in vm_vcpu_add()
834 if (vm->vcpu_head) in vm_vcpu_add()
835 vm->vcpu_head->prev = vcpu; in vm_vcpu_add()
836 vcpu->next = vm->vcpu_head; in vm_vcpu_add()
837 vm->vcpu_head = vcpu; in vm_vcpu_add()
860 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
863 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
866 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
867 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
871 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
873 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
882 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
885 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
894 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
897 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
911 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
917 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
924 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
948 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, in vm_vaddr_alloc() argument
951 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in vm_vaddr_alloc()
953 virt_pgd_alloc(vm, pgd_memslot); in vm_vaddr_alloc()
959 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in vm_vaddr_alloc()
963 pages--, vaddr += vm->page_size) { in vm_vaddr_alloc()
966 paddr = vm_phy_page_alloc(vm, in vm_vaddr_alloc()
967 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); in vm_vaddr_alloc()
969 virt_pg_map(vm, vaddr, paddr, pgd_memslot); in vm_vaddr_alloc()
971 sparsebit_set(vm->vpages_mapped, in vm_vaddr_alloc()
972 vaddr >> vm->page_shift); in vm_vaddr_alloc()
995 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
998 size_t page_size = vm->page_size; in virt_map()
1005 virt_pg_map(vm, vaddr, paddr, pgd_memslot); in virt_map()
1028 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1031 for (region = vm->userspace_mem_region_head; region; in addr_gpa2hva()
1061 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1064 for (region = vm->userspace_mem_region_head; region; in addr_hva2gpa()
1091 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1095 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); in vm_create_irqchip()
1099 vm->has_irqchip = true; in vm_create_irqchip()
1117 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_state() argument
1119 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_state()
1139 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_run() argument
1141 int ret = _vcpu_run(vm, vcpuid); in vcpu_run()
1146 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in _vcpu_run() argument
1148 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_run()
1158 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_run_complete_io() argument
1160 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_run_complete_io()
1189 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_set_mp_state() argument
1192 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_set_mp_state()
1217 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) in vcpu_regs_get() argument
1219 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_get()
1244 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) in vcpu_regs_set() argument
1246 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_set()
1257 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_get() argument
1260 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_get()
1270 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_set() argument
1273 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_set()
1285 void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_nested_state_get() argument
1288 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_nested_state_get()
1299 int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_nested_state_set() argument
1302 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_nested_state_set()
1333 void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in vcpu_sregs_get() argument
1335 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_sregs_get()
1360 void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in vcpu_sregs_set() argument
1362 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); in vcpu_sregs_set()
1367 int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) in _vcpu_sregs_set() argument
1369 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_sregs_set()
1389 void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_ioctl() argument
1394 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); in vcpu_ioctl()
1399 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, in _vcpu_ioctl() argument
1402 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_ioctl()
1424 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) in vm_ioctl() argument
1428 ret = ioctl(vm->fd, cmd, arg); in vm_ioctl()
1448 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
1453 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1454 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1455 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1457 for (region = vm->userspace_mem_region_head; region; in vm_dump()
1468 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1470 vm->pgd_created); in vm_dump()
1471 if (vm->pgd_created) { in vm_dump()
1474 virt_dump(stream, vm, indent + 4); in vm_dump()
1477 for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next) in vm_dump()
1478 vcpu_dump(stream, vm, vcpu->id, indent + 2); in vm_dump()
1557 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc() argument
1565 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_pages_alloc()
1568 paddr_min, vm->page_size); in vm_phy_pages_alloc()
1570 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
1571 base = pg = paddr_min >> vm->page_shift; in vm_phy_pages_alloc()
1585 paddr_min, vm->page_size, memslot); in vm_phy_pages_alloc()
1587 vm_dump(stderr, vm, 2); in vm_phy_pages_alloc()
1594 return base * vm->page_size; in vm_phy_pages_alloc()
1597 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, in vm_phy_page_alloc() argument
1600 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
1615 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
1617 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
1632 bool vm_is_unrestricted_guest(struct kvm_vm *vm) in vm_is_unrestricted_guest() argument
1638 if (vm == NULL) { in vm_is_unrestricted_guest()
1656 unsigned int vm_get_page_size(struct kvm_vm *vm) in vm_get_page_size() argument
1658 return vm->page_size; in vm_get_page_size()
1661 unsigned int vm_get_page_shift(struct kvm_vm *vm) in vm_get_page_shift() argument
1663 return vm->page_shift; in vm_get_page_shift()
1666 unsigned int vm_get_max_gfn(struct kvm_vm *vm) in vm_get_max_gfn() argument
1668 return vm->max_gfn; in vm_get_max_gfn()