Lines Matching refs:vm
122 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) in vm_enable_dirty_ring() argument
124 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring()
125 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring()
127 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring()
128 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
131 static void vm_open(struct kvm_vm *vm) in vm_open() argument
133 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
137 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
138 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
190 struct kvm_vm *vm; in ____vm_create() local
195 vm = calloc(1, sizeof(*vm)); in ____vm_create()
196 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in ____vm_create()
198 INIT_LIST_HEAD(&vm->vcpus); in ____vm_create()
199 vm->regions.gpa_tree = RB_ROOT; in ____vm_create()
200 vm->regions.hva_tree = RB_ROOT; in ____vm_create()
201 hash_init(vm->regions.slot_hash); in ____vm_create()
203 vm->mode = mode; in ____vm_create()
204 vm->type = 0; in ____vm_create()
206 vm->pa_bits = vm_guest_mode_params[mode].pa_bits; in ____vm_create()
207 vm->va_bits = vm_guest_mode_params[mode].va_bits; in ____vm_create()
208 vm->page_size = vm_guest_mode_params[mode].page_size; in ____vm_create()
209 vm->page_shift = vm_guest_mode_params[mode].page_shift; in ____vm_create()
212 switch (vm->mode) { in ____vm_create()
214 vm->pgtable_levels = 4; in ____vm_create()
217 vm->pgtable_levels = 3; in ____vm_create()
220 vm->pgtable_levels = 4; in ____vm_create()
223 vm->pgtable_levels = 3; in ____vm_create()
227 vm->pgtable_levels = 4; in ____vm_create()
231 vm->pgtable_levels = 3; in ____vm_create()
236 vm->pgtable_levels = 4; in ____vm_create()
239 vm->pgtable_levels = 3; in ____vm_create()
243 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in ____vm_create()
249 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in ____vm_create()
251 vm->va_bits); in ____vm_create()
253 vm->pa_bits); in ____vm_create()
254 vm->pgtable_levels = 4; in ____vm_create()
255 vm->va_bits = 48; in ____vm_create()
261 vm->pgtable_levels = 5; in ____vm_create()
264 vm->pgtable_levels = 5; in ____vm_create()
271 if (vm->pa_bits != 40) in ____vm_create()
272 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create()
275 vm_open(vm); in ____vm_create()
278 vm->vpages_valid = sparsebit_alloc(); in ____vm_create()
279 sparsebit_set_num(vm->vpages_valid, in ____vm_create()
280 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in ____vm_create()
281 sparsebit_set_num(vm->vpages_valid, in ____vm_create()
282 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in ____vm_create()
283 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in ____vm_create()
286 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create()
289 vm->vpages_mapped = sparsebit_alloc(); in ____vm_create()
291 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in ____vm_create()
294 return vm; in ____vm_create()
337 struct kvm_vm *vm; in __vm_create() local
339 vm = ____vm_create(mode, nr_pages); in __vm_create()
341 kvm_vm_elf_load(vm, program_invocation_name); in __vm_create()
344 vm_create_irqchip(vm); in __vm_create()
346 return vm; in __vm_create()
372 struct kvm_vm *vm; in __vm_create_with_vcpus() local
377 vm = __vm_create(mode, nr_vcpus, extra_mem_pages); in __vm_create_with_vcpus()
380 vcpus[i] = vm_vcpu_add(vm, i, guest_code); in __vm_create_with_vcpus()
382 return vm; in __vm_create_with_vcpus()
390 struct kvm_vm *vm; in __vm_create_with_one_vcpu() local
392 vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages, in __vm_create_with_one_vcpu()
396 return vm; in __vm_create_with_one_vcpu()
433 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, in vm_arch_vcpu_recreate() argument
436 return __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate()
439 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) in vm_recreate_with_one_vcpu() argument
441 kvm_vm_restart(vm); in vm_recreate_with_one_vcpu()
443 return vm_vcpu_recreate(vm, 0); in vm_recreate_with_one_vcpu()
466 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
470 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
505 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, in kvm_userspace_memory_region_find() argument
510 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
534 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vm_vcpu_rm() argument
539 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
571 static void __vm_mem_region_delete(struct kvm_vm *vm, in __vm_mem_region_delete() argument
578 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
579 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
584 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in __vm_mem_region_delete()
668 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) in kvm_memcmp_hva_gva() argument
683 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); in kvm_memcmp_hva_gva()
690 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
691 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
692 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) in kvm_memcmp_hva_gva()
693 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
695 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
696 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); in kvm_memcmp_hva_gva()
767 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in __vm_set_user_memory_region() argument
778 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); in __vm_set_user_memory_region()
781 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in vm_set_user_memory_region() argument
784 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); in vm_set_user_memory_region()
812 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
822 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_userspace_mem_region_add()
824 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_userspace_mem_region_add()
826 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
829 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
830 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_userspace_mem_region_add()
831 <= vm->max_gfn, "Physical range beyond maximum " in vm_userspace_mem_region_add()
835 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
842 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
849 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
854 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
872 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
919 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
922 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
928 guest_paddr >> vm->page_shift, npages); in vm_userspace_mem_region_add()
932 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
934 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
943 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
944 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
945 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_userspace_mem_region_add()
977 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument
981 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
989 vm_dump(stderr, vm, 2); in memslot2region()
1008 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
1013 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1017 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
1038 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) in vm_mem_region_move() argument
1043 region = memslot2region(vm, slot); in vm_mem_region_move()
1047 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_move()
1067 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) in vm_mem_region_delete() argument
1069 __vm_mem_region_delete(vm, memslot2region(vm, slot), true); in vm_mem_region_delete()
1088 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) in vcpu_exists() argument
1092 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_exists()
1104 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in __vm_vcpu_add() argument
1109 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id); in __vm_vcpu_add()
1115 vcpu->vm = vm; in __vm_vcpu_add()
1117 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); in __vm_vcpu_add()
1129 list_add(&vcpu->list, &vm->vcpus); in __vm_vcpu_add()
1154 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
1157 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1160 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1161 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1165 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1167 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1176 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1179 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1188 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1191 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1204 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1210 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1217 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1239 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) in vm_vaddr_alloc() argument
1241 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in vm_vaddr_alloc()
1243 virt_pgd_alloc(vm); in vm_vaddr_alloc()
1244 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, in vm_vaddr_alloc()
1245 KVM_UTIL_MIN_PFN * vm->page_size, 0); in vm_vaddr_alloc()
1251 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in vm_vaddr_alloc()
1255 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in vm_vaddr_alloc()
1257 virt_pg_map(vm, vaddr, paddr); in vm_vaddr_alloc()
1259 sparsebit_set(vm->vpages_mapped, in vm_vaddr_alloc()
1260 vaddr >> vm->page_shift); in vm_vaddr_alloc()
1280 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) in vm_vaddr_alloc_pages() argument
1282 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); in vm_vaddr_alloc_pages()
1299 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) in vm_vaddr_alloc_page() argument
1301 return vm_vaddr_alloc_pages(vm, 1); in vm_vaddr_alloc_page()
1320 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
1323 size_t page_size = vm->page_size; in virt_map()
1330 virt_pg_map(vm, vaddr, paddr); in virt_map()
1353 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1357 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1384 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1388 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1427 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2alias() argument
1432 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1444 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1446 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); in vm_create_irqchip()
1448 vm->has_irqchip = true; in vm_create_irqchip()
1509 uint32_t page_size = vcpu->vm->page_size; in vcpu_map_dirty_ring()
1510 uint32_t size = vcpu->vm->dirty_ring_size; in vcpu_map_dirty_ring()
1551 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_test_create_device() argument
1558 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_test_create_device()
1561 int __kvm_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_create_device() argument
1570 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_create_device()
1603 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in _kvm_irq_line() argument
1610 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); in _kvm_irq_line()
1613 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in kvm_irq_line() argument
1615 int ret = _kvm_irq_line(vm, irq, level); in kvm_irq_line()
1651 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in _kvm_gsi_routing_write() argument
1656 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); in _kvm_gsi_routing_write()
1662 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in kvm_gsi_routing_write() argument
1666 ret = _kvm_gsi_routing_write(vm, routing); in kvm_gsi_routing_write()
1685 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
1691 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1692 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1693 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1695 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1705 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1707 vm->pgd_created); in vm_dump()
1708 if (vm->pgd_created) { in vm_dump()
1711 virt_dump(stream, vm, indent + 4); in vm_dump()
1715 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
1799 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc() argument
1807 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in vm_phy_pages_alloc()
1810 paddr_min, vm->page_size); in vm_phy_pages_alloc()
1812 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
1813 base = pg = paddr_min >> vm->page_shift; in vm_phy_pages_alloc()
1827 paddr_min, vm->page_size, memslot); in vm_phy_pages_alloc()
1829 vm_dump(stderr, vm, 2); in vm_phy_pages_alloc()
1836 return base * vm->page_size; in vm_phy_pages_alloc()
1839 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, in vm_phy_page_alloc() argument
1842 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
1848 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) in vm_alloc_page_table() argument
1850 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); in vm_alloc_page_table()
1865 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
1867 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
1870 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) in vm_compute_max_gfn() argument
1872 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_compute_max_gfn()
1996 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, in __vm_get_stat() argument
2003 if (!vm->stats_fd) { in __vm_get_stat()
2004 vm->stats_fd = vm_get_stats_fd(vm); in __vm_get_stat()
2005 read_stats_header(vm->stats_fd, &vm->stats_header); in __vm_get_stat()
2006 vm->stats_desc = read_stats_descriptors(vm->stats_fd, in __vm_get_stat()
2007 &vm->stats_header); in __vm_get_stat()
2010 size_desc = get_stats_descriptor_size(&vm->stats_header); in __vm_get_stat()
2012 for (i = 0; i < vm->stats_header.num_desc; ++i) { in __vm_get_stat()
2013 desc = (void *)vm->stats_desc + (i * size_desc); in __vm_get_stat()
2018 read_stat_data(vm->stats_fd, &vm->stats_header, desc, in __vm_get_stat()