Searched refs:guest_phys_addr (Results 1 – 12 of 12) sorted by relevance
428 region->region.guest_phys_addr, in kvm_vm_restart()473 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()474 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()725 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()726 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()729 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()730 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()773 .guest_phys_addr = gpa, in __vm_set_user_memory_region()850 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()865 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()[all …]
98 __u64 guest_phys_addr; member
93 __u64 guest_phys_addr; member101 __u64 guest_phys_addr; member
59 __u64 guest_phys_addr; member
147 dev->mem->regions[0].guest_phys_addr = (long)dev->buf; in vdev_info_init()
526 i = (region->region.guest_phys_addr >> vm->page_shift) - 1; in nested_map_memslot()
1942 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) in __kvm_set_memory_region()1952 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) in __kvm_set_memory_region()1975 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); in __kvm_set_memory_region()
1467 region->guest_phys_addr, in vhost_set_memory()1468 region->guest_phys_addr + in vhost_set_memory()
1330 __u64 guest_phys_addr;1364 It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
12521 m.guest_phys_addr = gpa; in __x86_set_memory_region()