/Linux-v5.10/tools/testing/selftests/kvm/lib/x86_64/ |
D | vmx.c | 81 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx() local 84 vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx() 85 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 86 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 89 vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx() 90 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 91 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 94 vmx->msr = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx() 95 vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); in vcpu_alloc_vmx() 96 vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); in vcpu_alloc_vmx() [all …]
|
/Linux-v5.10/arch/x86/kvm/vmx/ |
D | vmx.c | 449 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) in vmx_segment_cache_clear() argument 451 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_clear() 672 static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) in __vmx_find_uret_msr() argument 676 for (i = 0; i < vmx->nr_uret_msrs; ++i) in __vmx_find_uret_msr() 677 if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr) in __vmx_find_uret_msr() 682 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) in vmx_find_uret_msr() argument 686 i = __vmx_find_uret_msr(vmx, msr); in vmx_find_uret_msr() 688 return &vmx->guest_uret_msrs[i]; in vmx_find_uret_msr() 692 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, in vmx_set_guest_uret_msr() argument 699 if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) { in vmx_set_guest_uret_msr() [all …]
|
D | nested.c | 188 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_fail() local 194 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) in nested_vmx_fail() 217 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) in vmx_disable_shadow_vmcs() argument 219 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in vmx_disable_shadow_vmcs() 221 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs() 226 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_evmcs() local 228 if (!vmx->nested.hv_evmcs) in nested_release_evmcs() 231 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); in nested_release_evmcs() 232 vmx->nested.hv_evmcs_vmptr = 0; in nested_release_evmcs() 233 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs() [all …]
|
D | vmx.h | 313 void vmx_set_constant_host_state(struct vcpu_vmx *vmx); 325 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); 339 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); 341 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); 351 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ 353 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ 355 vmx->loaded_vmcs->controls_shadow.lname = val; \ 358 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ 360 return vmx->loaded_vmcs->controls_shadow.lname; \ 362 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ [all …]
|
D | nested.h | 57 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_has_valid_vmcs12() local 65 return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || in vmx_has_valid_vmcs12() 66 vmx->nested.hv_evmcs; in vmx_has_valid_vmcs12() 71 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vpid02() local 73 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; in nested_get_vpid02()
|
D | evmcs.c | 329 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_evmcs_version() local 337 vmx->nested.enlightened_vmcs_enabled) in nested_get_evmcs_version() 424 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_enable_evmcs() local 426 vmx->nested.enlightened_vmcs_enabled = true; in nested_enable_evmcs()
|
/Linux-v5.10/tools/testing/selftests/kvm/x86_64/ |
D | vmx_set_nested_state_test.c | 83 state->hdr.vmx.vmxon_pa = 0x1000; in set_default_vmx_state() 84 state->hdr.vmx.vmcs12_pa = 0x2000; in set_default_vmx_state() 85 state->hdr.vmx.smm.flags = 0; in set_default_vmx_state() 114 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() 117 state->hdr.vmx.vmcs12_pa = -1ull; in test_vmx_nested_state() 134 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() 135 state->hdr.vmx.vmcs12_pa = -1ull; in test_vmx_nested_state() 146 state->hdr.vmx.smm.flags = 1; in test_vmx_nested_state() 151 state->hdr.vmx.flags = ~0; in test_vmx_nested_state() 156 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() [all …]
|
D | vmx_dirty_log_test.c | 50 void l1_guest_code(struct vmx_pages *vmx) in l1_guest_code() argument 55 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code() 56 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in l1_guest_code() 57 GUEST_ASSERT(load_vmcs(vmx)); in l1_guest_code() 59 prepare_vmcs(vmx, l2_guest_code, in l1_guest_code() 72 struct vmx_pages *vmx; in main() local 86 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in main() 110 prepare_eptp(vmx, vm, 0); in main() 111 nested_map_memslot(vmx, vm, 0, 0); in main() 112 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0); in main() [all …]
|
D | vmx_apic_access_test.c | 84 struct vmx_pages *vmx; in main() local 99 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in main() 100 prepare_virtualize_apic_accesses(vmx, vm, 0); in main()
|
/Linux-v5.10/tools/testing/selftests/powerpc/ptrace/ |
D | ptrace-vsx.h | 31 int validate_vmx(unsigned long vmx[][2], unsigned long *load) in validate_vmx() 37 if ((vmx[i][0] != load[64 + 2 * i]) || in validate_vmx() 38 (vmx[i][1] != load[65 + 2 * i])) { in validate_vmx() 40 i, vmx[i][0], 64 + 2 * i, in validate_vmx() 43 i, vmx[i][1], 65 + 2 * i, in validate_vmx() 51 if ((vmx[i][0] != load[65 + 2 * i]) || in validate_vmx() 52 (vmx[i][1] != load[64 + 2 * i])) { in validate_vmx() 54 i, vmx[i][0], 65 + 2 * i, in validate_vmx() 57 i, vmx[i][1], 64 + 2 * i, in validate_vmx() 109 unsigned long vmx[][2]) in load_vsx_vmx() [all …]
|
D | ptrace-tm-vsx.c | 87 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_vsx() local 92 FAIL_IF(show_vmx(child, vmx)); in trace_tm_vsx() 93 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_vsx() 96 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_vsx() 97 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_vsx() 99 memset(vmx, 0, sizeof(vmx)); in trace_tm_vsx() 101 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_vsx() 104 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_vsx()
|
D | ptrace-tm-spd-vsx.c | 99 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_spd_vsx() local 104 FAIL_IF(show_vmx(child, vmx)); in trace_tm_spd_vsx() 105 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_spd_vsx() 108 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx() 109 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_spd_vsx() 112 memset(vmx, 0, sizeof(vmx)); in trace_tm_spd_vsx() 114 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_spd_vsx() 117 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx()
|
D | ptrace-vsx.c | 40 unsigned long vmx[VMX_MAX + 2][2]; in trace_vsx() local 45 FAIL_IF(show_vmx(child, vmx)); in trace_vsx() 46 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_vsx() 49 memset(vmx, 0, sizeof(vmx)); in trace_vsx() 50 load_vsx_vmx(fp_load_new, vsx, vmx); in trace_vsx() 53 FAIL_IF(write_vmx(child, vmx)); in trace_vsx()
|
D | ptrace.h | 529 int show_vmx(pid_t child, unsigned long vmx[][2]) in show_vmx() 533 ret = ptrace(PTRACE_GETVRREGS, child, 0, vmx); in show_vmx() 541 int show_vmx_ckpt(pid_t child, unsigned long vmx[][2]) in show_vmx_ckpt() 554 memcpy(vmx, regs, sizeof(regs)); in show_vmx_ckpt() 559 int write_vmx(pid_t child, unsigned long vmx[][2]) in write_vmx() 563 ret = ptrace(PTRACE_SETVRREGS, child, 0, vmx); in write_vmx() 571 int write_vmx_ckpt(pid_t child, unsigned long vmx[][2]) in write_vmx_ckpt() 577 memcpy(regs, vmx, sizeof(regs)); in write_vmx_ckpt()
|
/Linux-v5.10/arch/x86/kvm/ |
D | Makefile | 21 kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ 22 vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
|
/Linux-v5.10/tools/testing/selftests/kvm/include/x86_64/ |
D | vmx.h | 606 bool prepare_for_vmx_operation(struct vmx_pages *vmx); 607 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 608 bool load_vmcs(struct vmx_pages *vmx); 613 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, 615 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, 618 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, 620 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, 622 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
|
/Linux-v5.10/drivers/crypto/vmx/ |
D | Makefile | 2 obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o 3 vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
|
D | Kconfig | 10 choose 'M' here, this module will be called vmx-crypto.
|
/Linux-v5.10/tools/testing/selftests/powerpc/tm/ |
D | Makefile | 3 tm-signal-context-chk-vmx tm-signal-context-chk-vsx 6 tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ 22 $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
|
D | .gitignore | 13 tm-signal-context-chk-vmx 18 tm-vmx-unavail
|
/Linux-v5.10/Documentation/virt/kvm/ |
D | index.rst | 18 nested-vmx
|
D | nested-vmx.rst | 49 - cpu qemu64,+vmx (add just the vmx feature to a named CPU type) 69 internals of this structure; This is struct vmcs12 from arch/x86/kvm/vmx.c. 78 VMCS12_REVISION (from vmx.c) should be changed if struct vmcs12 or its inner
|
/Linux-v5.10/arch/x86/events/intel/ |
D | pt.h | 46 bool vmx; member
|
/Linux-v5.10/arch/powerpc/lib/ |
D | Makefile | 48 obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
|
/Linux-v5.10/arch/x86/include/uapi/asm/ |
D | kvm.h | 458 struct kvm_vmx_nested_state_hdr vmx; member 471 struct kvm_vmx_nested_state_data vmx[0]; member
|