1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_EVMCS_H
3 #define __KVM_X86_VMX_EVMCS_H
4 
5 #include <linux/jump_label.h>
6 
7 #include <asm/hyperv-tlfs.h>
8 #include <asm/mshyperv.h>
9 #include <asm/vmx.h>
10 
11 #include "capabilities.h"
12 #include "vmcs.h"
13 #include "vmcs12.h"
14 
15 struct vmcs_config;
16 
17 DECLARE_STATIC_KEY_FALSE(enable_evmcs);
18 
19 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
20 
21 #define KVM_EVMCS_VERSION 1
22 
23 /*
24  * Enlightened VMCSv1 doesn't support these:
25  *
26  *	POSTED_INTR_NV                  = 0x00000002,
27  *	GUEST_INTR_STATUS               = 0x00000810,
28  *	APIC_ACCESS_ADDR		= 0x00002014,
29  *	POSTED_INTR_DESC_ADDR           = 0x00002016,
30  *	EOI_EXIT_BITMAP0                = 0x0000201c,
31  *	EOI_EXIT_BITMAP1                = 0x0000201e,
32  *	EOI_EXIT_BITMAP2                = 0x00002020,
33  *	EOI_EXIT_BITMAP3                = 0x00002022,
34  *	GUEST_PML_INDEX			= 0x00000812,
35  *	PML_ADDRESS			= 0x0000200e,
36  *	VM_FUNCTION_CONTROL             = 0x00002018,
37  *	EPTP_LIST_ADDRESS               = 0x00002024,
38  *	VMREAD_BITMAP                   = 0x00002026,
39  *	VMWRITE_BITMAP                  = 0x00002028,
40  *
41  *	TSC_MULTIPLIER                  = 0x00002032,
42  *	PLE_GAP                         = 0x00004020,
43  *	PLE_WINDOW                      = 0x00004022,
44  *	VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
45  *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
46  *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
47  *
48  * Currently unsupported in KVM:
49  *	GUEST_IA32_RTIT_CTL		= 0x00002814,
50  */
51 #define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
52 				    PIN_BASED_VMX_PREEMPTION_TIMER)
53 #define EVMCS1_UNSUPPORTED_2NDEXEC					\
54 	(SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |				\
55 	 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |			\
56 	 SECONDARY_EXEC_APIC_REGISTER_VIRT |				\
57 	 SECONDARY_EXEC_ENABLE_PML |					\
58 	 SECONDARY_EXEC_ENABLE_VMFUNC |					\
59 	 SECONDARY_EXEC_SHADOW_VMCS |					\
60 	 SECONDARY_EXEC_TSC_SCALING |					\
61 	 SECONDARY_EXEC_PAUSE_LOOP_EXITING)
62 #define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
63 #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
64 #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
65 
66 #if IS_ENABLED(CONFIG_HYPERV)
67 
68 struct evmcs_field {
69 	u16 offset;
70 	u16 clean_field;
71 };
72 
73 extern const struct evmcs_field vmcs_field_to_evmcs_1[];
74 extern const unsigned int nr_evmcs_1_fields;
75 
get_evmcs_offset(unsigned long field,u16 * clean_field)76 static __always_inline int get_evmcs_offset(unsigned long field,
77 					    u16 *clean_field)
78 {
79 	unsigned int index = ROL16(field, 6);
80 	const struct evmcs_field *evmcs_field;
81 
82 	if (unlikely(index >= nr_evmcs_1_fields)) {
83 		WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
84 			  field);
85 		return -ENOENT;
86 	}
87 
88 	evmcs_field = &vmcs_field_to_evmcs_1[index];
89 
90 	if (clean_field)
91 		*clean_field = evmcs_field->clean_field;
92 
93 	return evmcs_field->offset;
94 }
95 
evmcs_write64(unsigned long field,u64 value)96 static inline void evmcs_write64(unsigned long field, u64 value)
97 {
98 	u16 clean_field;
99 	int offset = get_evmcs_offset(field, &clean_field);
100 
101 	if (offset < 0)
102 		return;
103 
104 	*(u64 *)((char *)current_evmcs + offset) = value;
105 
106 	current_evmcs->hv_clean_fields &= ~clean_field;
107 }
108 
evmcs_write32(unsigned long field,u32 value)109 static inline void evmcs_write32(unsigned long field, u32 value)
110 {
111 	u16 clean_field;
112 	int offset = get_evmcs_offset(field, &clean_field);
113 
114 	if (offset < 0)
115 		return;
116 
117 	*(u32 *)((char *)current_evmcs + offset) = value;
118 	current_evmcs->hv_clean_fields &= ~clean_field;
119 }
120 
evmcs_write16(unsigned long field,u16 value)121 static inline void evmcs_write16(unsigned long field, u16 value)
122 {
123 	u16 clean_field;
124 	int offset = get_evmcs_offset(field, &clean_field);
125 
126 	if (offset < 0)
127 		return;
128 
129 	*(u16 *)((char *)current_evmcs + offset) = value;
130 	current_evmcs->hv_clean_fields &= ~clean_field;
131 }
132 
evmcs_read64(unsigned long field)133 static inline u64 evmcs_read64(unsigned long field)
134 {
135 	int offset = get_evmcs_offset(field, NULL);
136 
137 	if (offset < 0)
138 		return 0;
139 
140 	return *(u64 *)((char *)current_evmcs + offset);
141 }
142 
evmcs_read32(unsigned long field)143 static inline u32 evmcs_read32(unsigned long field)
144 {
145 	int offset = get_evmcs_offset(field, NULL);
146 
147 	if (offset < 0)
148 		return 0;
149 
150 	return *(u32 *)((char *)current_evmcs + offset);
151 }
152 
evmcs_read16(unsigned long field)153 static inline u16 evmcs_read16(unsigned long field)
154 {
155 	int offset = get_evmcs_offset(field, NULL);
156 
157 	if (offset < 0)
158 		return 0;
159 
160 	return *(u16 *)((char *)current_evmcs + offset);
161 }
162 
evmcs_touch_msr_bitmap(void)163 static inline void evmcs_touch_msr_bitmap(void)
164 {
165 	if (unlikely(!current_evmcs))
166 		return;
167 
168 	if (current_evmcs->hv_enlightenments_control.msr_bitmap)
169 		current_evmcs->hv_clean_fields &=
170 			~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
171 }
172 
evmcs_load(u64 phys_addr)173 static inline void evmcs_load(u64 phys_addr)
174 {
175 	struct hv_vp_assist_page *vp_ap =
176 		hv_get_vp_assist_page(smp_processor_id());
177 
178 	if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
179 		vp_ap->nested_control.features.directhypercall = 1;
180 	vp_ap->current_nested_vmcs = phys_addr;
181 	vp_ap->enlighten_vmentry = 1;
182 }
183 
184 __init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
185 #else /* !IS_ENABLED(CONFIG_HYPERV) */
evmcs_write64(unsigned long field,u64 value)186 static inline void evmcs_write64(unsigned long field, u64 value) {}
evmcs_write32(unsigned long field,u32 value)187 static inline void evmcs_write32(unsigned long field, u32 value) {}
evmcs_write16(unsigned long field,u16 value)188 static inline void evmcs_write16(unsigned long field, u16 value) {}
evmcs_read64(unsigned long field)189 static inline u64 evmcs_read64(unsigned long field) { return 0; }
evmcs_read32(unsigned long field)190 static inline u32 evmcs_read32(unsigned long field) { return 0; }
evmcs_read16(unsigned long field)191 static inline u16 evmcs_read16(unsigned long field) { return 0; }
evmcs_load(u64 phys_addr)192 static inline void evmcs_load(u64 phys_addr) {}
evmcs_touch_msr_bitmap(void)193 static inline void evmcs_touch_msr_bitmap(void) {}
194 #endif /* IS_ENABLED(CONFIG_HYPERV) */
195 
196 #define EVMPTR_INVALID (-1ULL)
197 #define EVMPTR_MAP_PENDING (-2ULL)
198 
evmptr_is_valid(u64 evmptr)199 static inline bool evmptr_is_valid(u64 evmptr)
200 {
201 	return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
202 }
203 
204 enum nested_evmptrld_status {
205 	EVMPTRLD_DISABLED,
206 	EVMPTRLD_SUCCEEDED,
207 	EVMPTRLD_VMFAIL,
208 	EVMPTRLD_ERROR,
209 };
210 
211 bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
212 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
213 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
214 			uint16_t *vmcs_version);
215 void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
216 int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
217 
218 #endif /* __KVM_X86_VMX_EVMCS_H */
219