1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* CPU virtualization extensions handling 3 * 4 * This should carry the code for handling CPU virtualization extensions 5 * that needs to live in the kernel core. 6 * 7 * Author: Eduardo Habkost <ehabkost@redhat.com> 8 * 9 * Copyright (C) 2008, Red Hat Inc. 10 * 11 * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc. 12 */ 13 #ifndef _ASM_X86_VIRTEX_H 14 #define _ASM_X86_VIRTEX_H 15 16 #include <asm/processor.h> 17 18 #include <asm/vmx.h> 19 #include <asm/svm.h> 20 #include <asm/tlbflush.h> 21 22 /* 23 * VMX functions: 24 */ 25 cpu_has_vmx(void)26static inline int cpu_has_vmx(void) 27 { 28 unsigned long ecx = cpuid_ecx(1); 29 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ 30 } 31 32 33 /** Disable VMX on the current CPU 34 * 35 * vmxoff causes a undefined-opcode exception if vmxon was not run 36 * on the CPU previously. Only call this function if you know VMX 37 * is enabled. 38 */ cpu_vmxoff(void)39static inline void cpu_vmxoff(void) 40 { 41 asm volatile ("vmxoff"); 42 cr4_clear_bits(X86_CR4_VMXE); 43 } 44 cpu_vmx_enabled(void)45static inline int cpu_vmx_enabled(void) 46 { 47 return __read_cr4() & X86_CR4_VMXE; 48 } 49 50 /** Disable VMX if it is enabled on the current CPU 51 * 52 * You shouldn't call this if cpu_has_vmx() returns 0. 53 */ __cpu_emergency_vmxoff(void)54static inline void __cpu_emergency_vmxoff(void) 55 { 56 if (cpu_vmx_enabled()) 57 cpu_vmxoff(); 58 } 59 60 /** Disable VMX if it is supported and enabled on the current CPU 61 */ cpu_emergency_vmxoff(void)62static inline void cpu_emergency_vmxoff(void) 63 { 64 if (cpu_has_vmx()) 65 __cpu_emergency_vmxoff(); 66 } 67 68 69 70 71 /* 72 * SVM functions: 73 */ 74 75 /** Check if the CPU has SVM support 76 * 77 * You can use the 'msg' arg to get a message describing the problem, 78 * if the function returns zero. Simply pass NULL if you are not interested 79 * on the messages; gcc should take care of not generating code for 80 * the messages on this case. 81 */ cpu_has_svm(const char ** msg)82static inline int cpu_has_svm(const char **msg) 83 { 84 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 85 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) { 86 if (msg) 87 *msg = "not amd or hygon"; 88 return 0; 89 } 90 91 if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) { 92 if (msg) 93 *msg = "can't execute cpuid_8000000a"; 94 return 0; 95 } 96 97 if (!boot_cpu_has(X86_FEATURE_SVM)) { 98 if (msg) 99 *msg = "svm not available"; 100 return 0; 101 } 102 return 1; 103 } 104 105 106 /** Disable SVM on the current CPU 107 * 108 * You should call this only if cpu_has_svm() returned true. 109 */ cpu_svm_disable(void)110static inline void cpu_svm_disable(void) 111 { 112 uint64_t efer; 113 114 wrmsrl(MSR_VM_HSAVE_PA, 0); 115 rdmsrl(MSR_EFER, efer); 116 wrmsrl(MSR_EFER, efer & ~EFER_SVME); 117 } 118 119 /** Makes sure SVM is disabled, if it is supported on the CPU 120 */ cpu_emergency_svm_disable(void)121static inline void cpu_emergency_svm_disable(void) 122 { 123 if (cpu_has_svm(NULL)) 124 cpu_svm_disable(); 125 } 126 127 #endif /* _ASM_X86_VIRTEX_H */ 128