Lines Matching refs:vm
78 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) in vm_enable_cap() argument
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); in vm_enable_cap()
89 static void vm_open(struct kvm_vm *vm, int perm) in vm_open() argument
91 vm->kvm_fd = open(KVM_DEV_PATH, perm); in vm_open()
92 if (vm->kvm_fd < 0) in vm_open()
96 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL); in vm_open()
97 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " in vm_open()
98 "rc: %i errno: %i", vm->fd, errno); in vm_open()
121 struct kvm_vm *vm; in vm_create() local
125 vm = calloc(1, sizeof(*vm)); in vm_create()
126 TEST_ASSERT(vm != NULL, "Insufficent Memory"); in vm_create()
128 vm->mode = mode; in vm_create()
129 vm_open(vm, perm); in vm_create()
132 switch (vm->mode) { in vm_create()
134 vm->page_size = 0x1000; in vm_create()
135 vm->page_shift = 12; in vm_create()
138 vm->vpages_valid = sparsebit_alloc(); in vm_create()
139 sparsebit_set_num(vm->vpages_valid, in vm_create()
140 0, (1ULL << (48 - 1)) >> vm->page_shift); in vm_create()
141 sparsebit_set_num(vm->vpages_valid, in vm_create()
142 (~((1ULL << (48 - 1)) - 1)) >> vm->page_shift, in vm_create()
143 (1ULL << (48 - 1)) >> vm->page_shift); in vm_create()
146 vm->max_gfn = ((1ULL << 52) >> vm->page_shift) - 1; in vm_create()
154 vm->vpages_mapped = sparsebit_alloc(); in vm_create()
156 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in vm_create()
159 return vm; in vm_create()
195 void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log() argument
200 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
224 struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
228 for (region = vm->userspace_mem_region_head; region; in userspace_mem_region_find()
256 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, in kvm_userspace_memory_region_find() argument
261 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
283 struct vcpu *vcpu_find(struct kvm_vm *vm, in vcpu_find() argument
288 for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) { in vcpu_find()
308 static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid) in vm_vcpu_rm() argument
310 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vm_vcpu_rm()
325 vm->vcpu_head = vcpu->next; in vm_vcpu_rm()
409 struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() argument
422 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); in kvm_memcmp_hva_gva()
428 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
429 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
430 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
431 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
433 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
434 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
568 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
576 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; in vm_userspace_mem_region_add()
578 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
581 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
582 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
583 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
587 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
593 vm, guest_paddr, guest_paddr + npages * vm->page_size); in vm_userspace_mem_region_add()
600 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
605 for (region = vm->userspace_mem_region_head; region; in vm_userspace_mem_region_add()
611 && ((guest_paddr + npages * vm->page_size) in vm_userspace_mem_region_add()
628 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
648 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
654 region->host_mem, npages * vm->page_size, src_type); in vm_userspace_mem_region_add()
659 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
663 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
665 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
674 if (vm->userspace_mem_region_head) in vm_userspace_mem_region_add()
675 vm->userspace_mem_region_head->prev = region; in vm_userspace_mem_region_add()
676 region->next = vm->userspace_mem_region_head; in vm_userspace_mem_region_add()
677 vm->userspace_mem_region_head = region; in vm_userspace_mem_region_add()
694 static struct userspace_mem_region *memslot2region(struct kvm_vm *vm, in memslot2region() argument
699 for (region = vm->userspace_mem_region_head; region; in memslot2region()
708 vm_dump(stderr, vm, 2); in memslot2region()
728 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
734 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
738 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
788 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot) in vm_vcpu_add() argument
793 vcpu = vcpu_find(vm, vcpuid); in vm_vcpu_add()
805 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); in vm_vcpu_add()
818 if (vm->vcpu_head) in vm_vcpu_add()
819 vm->vcpu_head->prev = vcpu; in vm_vcpu_add()
820 vcpu->next = vm->vcpu_head; in vm_vcpu_add()
821 vm->vcpu_head = vcpu; in vm_vcpu_add()
823 vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot); in vm_vcpu_add()
845 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
848 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
851 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
852 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
856 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
858 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
867 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
870 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
879 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
882 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
896 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
902 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
909 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
932 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, in vm_vaddr_alloc() argument
935 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in vm_vaddr_alloc()
937 virt_pgd_alloc(vm, pgd_memslot); in vm_vaddr_alloc()
942 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in vm_vaddr_alloc()
946 pages--, vaddr += vm->page_size) { in vm_vaddr_alloc()
949 paddr = vm_phy_page_alloc(vm, KVM_UTIL_MIN_PADDR, data_memslot); in vm_vaddr_alloc()
951 virt_pg_map(vm, vaddr, paddr, pgd_memslot); in vm_vaddr_alloc()
953 sparsebit_set(vm->vpages_mapped, in vm_vaddr_alloc()
954 vaddr >> vm->page_shift); in vm_vaddr_alloc()
977 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
980 size_t page_size = vm->page_size; in virt_map()
987 virt_pg_map(vm, vaddr, paddr, pgd_memslot); in virt_map()
1009 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1012 for (region = vm->userspace_mem_region_head; region; in addr_gpa2hva()
1041 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1044 for (region = vm->userspace_mem_region_head; region; in addr_hva2gpa()
1070 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1074 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); in vm_create_irqchip()
1078 vm->has_irqchip = true; in vm_create_irqchip()
1095 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_state() argument
1097 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_state()
1116 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in vcpu_run() argument
1118 int ret = _vcpu_run(vm, vcpuid); in vcpu_run()
1123 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) in _vcpu_run() argument
1125 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_run()
1149 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_set_mp_state() argument
1152 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_set_mp_state()
1176 void vcpu_regs_get(struct kvm_vm *vm, in vcpu_regs_get() argument
1179 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_get()
1204 void vcpu_regs_set(struct kvm_vm *vm, in vcpu_regs_set() argument
1207 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_regs_set()
1218 void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_get() argument
1221 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_get()
1232 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, in vcpu_events_set() argument
1235 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_events_set()
1259 uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) in vcpu_get_msr() argument
1261 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_get_msr()
1292 void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, in vcpu_set_msr() argument
1295 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_set_msr()
1328 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) in vcpu_args_set() argument
1338 vcpu_regs_get(vm, vcpuid, ®s); in vcpu_args_set()
1358 vcpu_regs_set(vm, vcpuid, ®s); in vcpu_args_set()
1376 void vcpu_sregs_get(struct kvm_vm *vm, in vcpu_sregs_get() argument
1379 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_sregs_get()
1405 void vcpu_sregs_set(struct kvm_vm *vm, in vcpu_sregs_set() argument
1408 int ret = _vcpu_sregs_set(vm, vcpuid, sregs); in vcpu_sregs_set()
1413 int _vcpu_sregs_set(struct kvm_vm *vm, in _vcpu_sregs_set() argument
1416 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in _vcpu_sregs_set()
1437 void vcpu_ioctl(struct kvm_vm *vm, in vcpu_ioctl() argument
1440 struct vcpu *vcpu = vcpu_find(vm, vcpuid); in vcpu_ioctl()
1461 void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) in vm_ioctl() argument
1465 ret = ioctl(vm->fd, cmd, arg); in vm_ioctl()
1484 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
1489 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1490 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1491 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1493 for (region = vm->userspace_mem_region_head; region; in vm_dump()
1504 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1506 vm->pgd_created); in vm_dump()
1507 if (vm->pgd_created) { in vm_dump()
1510 virt_dump(stream, vm, indent + 4); in vm_dump()
1513 for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next) in vm_dump()
1514 vcpu_dump(stream, vm, vcpu->id, indent + 2); in vm_dump()
1532 void vcpu_dump(FILE *stream, struct kvm_vm *vm, in vcpu_dump() argument
1541 vcpu_regs_get(vm, vcpuid, ®s); in vcpu_dump()
1545 vcpu_sregs_get(vm, vcpuid, &sregs); in vcpu_dump()
1622 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, in vm_phy_page_alloc() argument
1628 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_page_alloc()
1631 paddr_min, vm->page_size); in vm_phy_page_alloc()
1634 region = memslot2region(vm, memslot); in vm_phy_page_alloc()
1637 pg = paddr_min >> vm->page_shift; in vm_phy_page_alloc()
1644 paddr_min, vm->page_size, memslot); in vm_phy_page_alloc()
1646 vm_dump(stderr, vm, 2); in vm_phy_page_alloc()
1654 return pg * vm->page_size; in vm_phy_page_alloc()
1668 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
1670 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
1673 void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id, in guest_args_read() argument
1676 struct kvm_run *run = vcpu_state(vm, vcpu_id); in guest_args_read()
1680 vcpu_regs_get(vm, vcpu_id, ®s); in guest_args_read()