Lines Matching full:region
217 * When phy_pages is non-zero, a memory region of phy_pages physical pages
356 /* The maximum page table size for a memory region will be when the in vm_create_with_vcpus()
419 struct userspace_mem_region *region; in kvm_vm_restart() local
425 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
426 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in kvm_vm_restart()
431 ret, errno, region->region.slot, in kvm_vm_restart()
432 region->region.flags, in kvm_vm_restart()
433 region->region.guest_phys_addr, in kvm_vm_restart()
434 region->region.memory_size); in kvm_vm_restart()
467 * Userspace Memory Region Find
477 * Pointer to overlapping region, NULL if no such region.
479 * Searches for a region with any physical memory that overlaps with
483 * region exists.
491 struct userspace_mem_region *region = in userspace_mem_region_find() local
493 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
494 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
495 + region->region.memory_size - 1; in userspace_mem_region_find()
497 return region; in userspace_mem_region_find()
509 * KVM Userspace Memory Region Find
519 * Pointer to overlapping region, NULL if no such region.
528 struct userspace_mem_region *region; in kvm_userspace_memory_region_find() local
530 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
531 if (!region) in kvm_userspace_memory_region_find()
534 return ®ion->region; in kvm_userspace_memory_region_find()
617 struct userspace_mem_region *region, in __vm_mem_region_delete() argument
623 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
624 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
625 hash_del(®ion->slot_node); in __vm_mem_region_delete()
628 region->region.memory_size = 0; in __vm_mem_region_delete()
629 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in __vm_mem_region_delete()
633 sparsebit_free(®ion->unused_phy_pages); in __vm_mem_region_delete()
634 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
637 free(region); in __vm_mem_region_delete()
647 struct userspace_mem_region *region; in kvm_vm_free() local
653 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
654 __vm_mem_region_delete(vmp, region, false); in kvm_vm_free()
737 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
746 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
747 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
750 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
751 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
752 "Duplicate GPA in region tree"); in vm_userspace_mem_region_gpa_insert()
758 rb_link_node(®ion->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
759 rb_insert_color(®ion->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
763 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
772 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
775 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
777 "Duplicate HVA in region tree"); in vm_userspace_mem_region_hva_insert()
783 rb_link_node(®ion->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
784 rb_insert_color(®ion->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
788 * VM Userspace Memory Region Add
792 * src_type - Storage source for this region.
795 * slot - KVM region slot
797 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
805 * given by guest_paddr. The region is created with a KVM region slot
807 * region is created with the flags given by flags.
815 struct userspace_mem_region *region; in vm_userspace_mem_region_add() local
835 * Confirm a mem region with an overlapping address doesn't in vm_userspace_mem_region_add()
838 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_userspace_mem_region_add()
840 if (region != NULL) in vm_userspace_mem_region_add()
847 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
848 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
850 /* Confirm no region with the requested slot already exists. */ in vm_userspace_mem_region_add()
851 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
853 if (region->region.slot != slot) in vm_userspace_mem_region_add()
856 TEST_FAIL("A mem region with the requested slot " in vm_userspace_mem_region_add()
861 region->region.slot, in vm_userspace_mem_region_add()
862 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
863 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
866 /* Allocate and initialize new mem region structure. */ in vm_userspace_mem_region_add()
867 region = calloc(1, sizeof(*region)); in vm_userspace_mem_region_add()
868 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_userspace_mem_region_add()
869 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
883 region->mmap_size += alignment; in vm_userspace_mem_region_add()
885 region->fd = -1; in vm_userspace_mem_region_add()
892 region->fd = memfd_create("kvm_selftest", memfd_flags); in vm_userspace_mem_region_add()
893 TEST_ASSERT(region->fd != -1, in vm_userspace_mem_region_add()
896 ret = ftruncate(region->fd, region->mmap_size); in vm_userspace_mem_region_add()
899 ret = fallocate(region->fd, in vm_userspace_mem_region_add()
901 region->mmap_size); in vm_userspace_mem_region_add()
905 region->mmap_start = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
908 region->fd, 0); in vm_userspace_mem_region_add()
909 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_userspace_mem_region_add()
911 region->mmap_start, errno); in vm_userspace_mem_region_add()
914 region->host_mem = align(region->mmap_start, alignment); in vm_userspace_mem_region_add()
919 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
922 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
926 region->unused_phy_pages = sparsebit_alloc(); in vm_userspace_mem_region_add()
927 sparsebit_set_num(region->unused_phy_pages, in vm_userspace_mem_region_add()
929 region->region.slot = slot; in vm_userspace_mem_region_add()
930 region->region.flags = flags; in vm_userspace_mem_region_add()
931 region->region.guest_phys_addr = guest_paddr; in vm_userspace_mem_region_add()
932 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
933 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_userspace_mem_region_add()
934 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
940 guest_paddr, (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
943 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
944 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
945 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_userspace_mem_region_add()
948 if (region->fd >= 0) { in vm_userspace_mem_region_add()
949 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
952 region->fd, 0); in vm_userspace_mem_region_add()
953 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_userspace_mem_region_add()
957 region->host_alias = align(region->mmap_alias, alignment); in vm_userspace_mem_region_add()
962 * Memslot to region
971 * Pointer to memory region structure that describe memory region
973 * on error (e.g. currently no memory region using memslot as a KVM
979 struct userspace_mem_region *region; in memslot2region() local
981 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
983 if (region->region.slot == memslot) in memslot2region()
984 return region; in memslot2region()
986 fprintf(stderr, "No mem region with the requested slot found,\n" in memslot2region()
990 TEST_FAIL("Mem region not found"); in memslot2region()
995 * VM Memory Region Flags Set
1005 * Sets the flags of the memory region specified by the value of slot,
1011 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1013 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1015 region->region.flags = flags; in vm_mem_region_set_flags()
1017 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
1025 * VM Memory Region Move
1029 * slot - Slot of the memory region to move
1036 * Change the gpa of a memory region.
1040 struct userspace_mem_region *region; in vm_mem_region_move() local
1043 region = memslot2region(vm, slot); in vm_mem_region_move()
1045 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1047 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_move()
1055 * VM Memory Region Delete
1059 * slot - Slot of the memory region to delete
1065 * Delete a memory region.
1241 * data_memslot - Memory region slot for data pages
1242 * pgd_memslot - Memory region slot for new virtual translation tables
1328 * pgd_memslot - Memory region slot for new virtual translation tables
1365 * Locates the memory region containing the VM physical address given
1368 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1372 struct userspace_mem_region *region; in addr_gpa2hva() local
1374 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1375 if (!region) { in addr_gpa2hva()
1380 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1381 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1396 * Locates the memory region containing the host virtual address given
1399 * region containing hva exists.
1406 struct userspace_mem_region *region = in addr_hva2gpa() local
1409 if (hva >= region->host_mem) { in addr_hva2gpa()
1410 if (hva <= (region->host_mem in addr_hva2gpa()
1411 + region->region.memory_size - 1)) in addr_hva2gpa()
1413 region->region.guest_phys_addr in addr_hva2gpa()
1414 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1447 struct userspace_mem_region *region; in addr_gpa2alias() local
1450 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1451 if (!region) in addr_gpa2alias()
1454 if (!region->host_alias) in addr_gpa2alias()
1457 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1458 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
2061 struct userspace_mem_region *region; in vm_dump() local
2068 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
2071 (uint64_t) region->region.guest_phys_addr, in vm_dump()
2072 (uint64_t) region->region.memory_size, in vm_dump()
2073 region->host_mem); in vm_dump()
2075 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
2159 * memslot - Memory region to allocate page from
2174 struct userspace_mem_region *region; in vm_phy_pages_alloc() local
2184 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
2189 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in vm_phy_pages_alloc()
2190 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
2206 sparsebit_clear(region->unused_phy_pages, pg); in vm_phy_pages_alloc()