Lines Matching full:vm

66  *   vm - The VM to allocate guest-virtual addresses in.
75 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) in vcpu_alloc_vmx() argument
77 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
78 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx()
81 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
82 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
83 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx()
86 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
87 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx()
88 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx()
91 vmx->msr = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
92 vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); in vcpu_alloc_vmx()
93 vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); in vcpu_alloc_vmx()
97 vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
98 vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); in vcpu_alloc_vmx()
99 vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); in vcpu_alloc_vmx()
102 vmx->vmread = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
103 vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); in vcpu_alloc_vmx()
104 vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); in vcpu_alloc_vmx()
107 vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
108 vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); in vcpu_alloc_vmx()
109 vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); in vcpu_alloc_vmx()
113 vmx->vp_assist = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
114 vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist); in vcpu_alloc_vmx()
115 vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist); in vcpu_alloc_vmx()
118 vmx->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx()
120 addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs); in vcpu_alloc_vmx()
122 addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs); in vcpu_alloc_vmx()
387 static void nested_create_pte(struct kvm_vm *vm, in nested_create_pte() argument
400 pte->address = paddr >> vm->page_shift; in nested_create_pte()
402 pte->address = vm_alloc_page_table(vm) >> vm->page_shift; in nested_create_pte()
419 void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, in __nested_pg_map() argument
426 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in __nested_pg_map()
427 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in __nested_pg_map()
436 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, in __nested_pg_map()
438 " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", in __nested_pg_map()
439 paddr, vm->max_gfn, vm->page_size); in __nested_pg_map()
444 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in __nested_pg_map()
446 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", in __nested_pg_map()
447 paddr, vm->max_gfn, vm->page_size); in __nested_pg_map()
453 nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level); in __nested_pg_map()
458 pt = addr_gpa2hva(vm, pte->address * vm->page_size); in __nested_pg_map()
470 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, in nested_pg_map() argument
473 __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K); in nested_pg_map()
477 * Map a range of EPT guest physical addresses to the VM's physical address
480 * vm - Virtual Machine
482 * paddr - VM Physical Address
490 * Within the VM given by vm, creates a nested guest translation for the
493 void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, in __nested_map() argument
504 __nested_pg_map(vmx, vm, nested_paddr, paddr, level); in __nested_map()
510 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, in nested_map() argument
513 __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); in nested_map()
517 * physical pages in VM.
519 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, in nested_map_memslot() argument
524 memslot2region(vm, memslot); in nested_map_memslot()
526 i = (region->region.guest_phys_addr >> vm->page_shift) - 1; in nested_map_memslot()
527 last = i + (region->region.memory_size >> vm->page_shift); in nested_map_memslot()
533 nested_map(vmx, vm, in nested_map_memslot()
534 (uint64_t)i << vm->page_shift, in nested_map_memslot()
535 (uint64_t)i << vm->page_shift, in nested_map_memslot()
536 1 << vm->page_shift); in nested_map_memslot()
541 void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, in nested_identity_map_1g() argument
544 __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G); in nested_identity_map_1g()
547 bool kvm_vm_has_ept(struct kvm_vm *vm) in kvm_vm_has_ept() argument
552 vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list); in kvm_vm_has_ept()
563 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, in prepare_eptp() argument
566 TEST_REQUIRE(kvm_vm_has_ept(vm)); in prepare_eptp()
568 vmx->eptp = (void *)vm_vaddr_alloc_page(vm); in prepare_eptp()
569 vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp); in prepare_eptp()
570 vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp); in prepare_eptp()
573 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) in prepare_virtualize_apic_accesses() argument
575 vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); in prepare_virtualize_apic_accesses()
576 vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); in prepare_virtualize_apic_accesses()
577 vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access); in prepare_virtualize_apic_accesses()