/Linux-v5.4/arch/x86/include/asm/ |
D | virtext.h | 112 uint64_t efer; in cpu_svm_disable() local 115 rdmsrl(MSR_EFER, efer); in cpu_svm_disable() 116 wrmsrl(MSR_EFER, efer & ~EFER_SVME); in cpu_svm_disable()
|
D | suspend_64.h | 41 unsigned long efer; member
|
D | realmode.h | 48 u64 efer;
|
D | svm.h | 175 u64 efer; member
|
D | kvm_host.h | 555 u64 efer; member 1049 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 1359 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
|
/Linux-v5.4/arch/x86/realmode/ |
D | init.c | 50 u64 efer; in setup_real_mode() local 97 rdmsrl(MSR_EFER, efer); in setup_real_mode() 98 trampoline_header->efer = efer & ~EFER_LMA; in setup_real_mode()
|
/Linux-v5.4/include/xen/interface/hvm/ |
D | hvm_vcpu.h | 52 uint64_t efer; member 106 uint64_t efer; member
|
/Linux-v5.4/arch/x86/kvm/ |
D | emulate.c | 809 u64 efer = 0; in assign_eip_far() local 811 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); in assign_eip_far() 812 if (efer & EFER_LMA) in assign_eip_far() 1568 u64 efer = 0; in get_descriptor_ptr() local 1570 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); in get_descriptor_ptr() 1571 if (!(efer & EFER_LMA)) in get_descriptor_ptr() 1719 u64 efer = 0; in __load_segment_descriptor() local 1721 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); in __load_segment_descriptor() 1722 if (efer & EFER_LMA) in __load_segment_descriptor() 2584 unsigned long cr0, cr4, efer; in em_rsm() local [all …]
|
D | x86.h | 87 return vcpu->arch.efer & EFER_LMA; in is_long_mode() 106 return (vcpu->arch.efer & EFER_LMA) && in is_la57_mode()
|
D | svm.c | 734 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) in svm_set_efer() argument 736 vcpu->arch.efer = efer; in svm_set_efer() 740 efer |= EFER_NX; in svm_set_efer() 742 if (!(efer & EFER_LMA)) in svm_set_efer() 743 efer &= ~EFER_LME; in svm_set_efer() 746 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer() 913 uint64_t efer; in svm_hardware_enable() local 917 rdmsrl(MSR_EFER, efer); in svm_hardware_enable() 918 if (efer & EFER_SVME) in svm_hardware_enable() 939 wrmsrl(MSR_EFER, efer | EFER_SVME); in svm_hardware_enable() [all …]
|
D | x86.c | 772 if ((vcpu->arch.efer & EFER_LME)) { in kvm_set_cr0() 1368 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in __kvm_valid_efer() argument 1370 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) in __kvm_valid_efer() 1373 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) in __kvm_valid_efer() 1376 if (efer & (EFER_LME | EFER_LMA) && in __kvm_valid_efer() 1380 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) in __kvm_valid_efer() 1386 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument 1388 if (efer & efer_reserved_bits) in kvm_valid_efer() 1391 return __kvm_valid_efer(vcpu, efer); in kvm_valid_efer() 1397 u64 old_efer = vcpu->arch.efer; in set_efer() [all …]
|
D | cpuid.c | 157 unsigned long long efer = 0; in is_efer_nx() local 159 rdmsrl_safe(MSR_EFER, &efer); in is_efer_nx() 160 return efer & EFER_NX; in is_efer_nx()
|
D | mmu.c | 601 return vcpu->arch.efer & EFER_NX; in is_nx()
|
/Linux-v5.4/arch/x86/power/ |
D | cpu.c | 116 rdmsrl(MSR_EFER, ctxt->efer); in __save_processor_state() 207 wrmsrl(MSR_EFER, ctxt->efer); in __restore_processor_state()
|
/Linux-v5.4/arch/x86/include/uapi/asm/ |
D | kvm.h | 155 __u64 efer; member
|
/Linux-v5.4/tools/arch/x86/include/uapi/asm/ |
D | kvm.h | 155 __u64 efer; member
|
/Linux-v5.4/arch/x86/kvm/vmx/ |
D | vmx.c | 969 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer() 993 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer() 1668 if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { in setup_msrs() 2793 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) in vmx_set_efer() argument 2801 vcpu->arch.efer = efer; in vmx_set_efer() 2802 if (efer & EFER_LMA) { in vmx_set_efer() 2804 msr->data = efer; in vmx_set_efer() 2808 msr->data = efer & ~EFER_LME; in vmx_set_efer() 2829 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode() 2835 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode() [all …]
|
D | vmx.h | 320 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
D | nested.c | 1966 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); in nested_vmx_calc_efer() 1968 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); in nested_vmx_calc_efer() 2402 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02() 2404 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02() 2668 ia32e = !!(vcpu->arch.efer & EFER_LMA); in nested_vmx_check_host_state() 3715 vmcs12->guest_ia32_efer = vcpu->arch.efer; in sync_vmcs02_to_vmcs12() 3793 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state() 3795 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state() 3797 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state() 3798 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
|
/Linux-v5.4/drivers/parport/ |
D | parport_pc.c | 1110 static void decode_winbond(int efer, int key, int devid, int devrev, int oldid) in decode_winbond() argument 1156 efer, key, devid, devrev, oldid, type); in decode_winbond() 1159 show_parconfig_winbond(efer, key); in decode_winbond() 1162 static void decode_smsc(int efer, int key, int devid, int devrev) in decode_smsc() argument 1189 efer, key, devid, devrev, type); in decode_smsc() 1192 func(efer, key); in decode_smsc()
|
/Linux-v5.4/tools/testing/selftests/kvm/lib/x86_64/ |
D | processor.c | 220 sregs->cr8, sregs->efer, sregs->apic_base); in sregs_dump() 627 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); in vcpu_setup()
|
/Linux-v5.4/Documentation/virt/kvm/ |
D | mmu.txt | 165 Contains the value of efer.nxe for which the page is valid. 330 - mov to cr0/cr4/efer
|
D | api.txt | 423 __u64 efer;
|