1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_HYPERV_H
3 #define __KVM_X86_VMX_HYPERV_H
4 
5 #include <linux/jump_label.h>
6 
7 #include <asm/hyperv-tlfs.h>
8 #include <asm/mshyperv.h>
9 #include <asm/vmx.h>
10 
11 #include "../hyperv.h"
12 
13 #include "capabilities.h"
14 #include "vmcs.h"
15 #include "vmcs12.h"
16 
17 struct vmcs_config;
18 
19 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
20 
21 #define KVM_EVMCS_VERSION 1
22 
23 struct evmcs_field {
24 	u16 offset;
25 	u16 clean_field;
26 };
27 
28 extern const struct evmcs_field vmcs_field_to_evmcs_1[];
29 extern const unsigned int nr_evmcs_1_fields;
30 
evmcs_field_offset(unsigned long field,u16 * clean_field)31 static __always_inline int evmcs_field_offset(unsigned long field,
32 					      u16 *clean_field)
33 {
34 	unsigned int index = ROL16(field, 6);
35 	const struct evmcs_field *evmcs_field;
36 
37 	if (unlikely(index >= nr_evmcs_1_fields))
38 		return -ENOENT;
39 
40 	evmcs_field = &vmcs_field_to_evmcs_1[index];
41 
42 	/*
43 	 * Use offset=0 to detect holes in eVMCS. This offset belongs to
44 	 * 'revision_id' but this field has no encoding and is supposed to
45 	 * be accessed directly.
46 	 */
47 	if (unlikely(!evmcs_field->offset))
48 		return -ENOENT;
49 
50 	if (clean_field)
51 		*clean_field = evmcs_field->clean_field;
52 
53 	return evmcs_field->offset;
54 }
55 
evmcs_read_any(struct hv_enlightened_vmcs * evmcs,unsigned long field,u16 offset)56 static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
57 				 unsigned long field, u16 offset)
58 {
59 	/*
60 	 * vmcs12_read_any() doesn't care whether the supplied structure
61 	 * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
62 	 * the exact offset of the required field, use it for convenience
63 	 * here.
64 	 */
65 	return vmcs12_read_any((void *)evmcs, field, offset);
66 }
67 
68 #if IS_ENABLED(CONFIG_HYPERV)
69 
70 DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
71 
kvm_is_using_evmcs(void)72 static __always_inline bool kvm_is_using_evmcs(void)
73 {
74 	return static_branch_unlikely(&__kvm_is_using_evmcs);
75 }
76 
get_evmcs_offset(unsigned long field,u16 * clean_field)77 static __always_inline int get_evmcs_offset(unsigned long field,
78 					    u16 *clean_field)
79 {
80 	int offset = evmcs_field_offset(field, clean_field);
81 
82 	WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
83 	return offset;
84 }
85 
evmcs_write64(unsigned long field,u64 value)86 static __always_inline void evmcs_write64(unsigned long field, u64 value)
87 {
88 	u16 clean_field;
89 	int offset = get_evmcs_offset(field, &clean_field);
90 
91 	if (offset < 0)
92 		return;
93 
94 	*(u64 *)((char *)current_evmcs + offset) = value;
95 
96 	current_evmcs->hv_clean_fields &= ~clean_field;
97 }
98 
evmcs_write32(unsigned long field,u32 value)99 static __always_inline void evmcs_write32(unsigned long field, u32 value)
100 {
101 	u16 clean_field;
102 	int offset = get_evmcs_offset(field, &clean_field);
103 
104 	if (offset < 0)
105 		return;
106 
107 	*(u32 *)((char *)current_evmcs + offset) = value;
108 	current_evmcs->hv_clean_fields &= ~clean_field;
109 }
110 
evmcs_write16(unsigned long field,u16 value)111 static __always_inline void evmcs_write16(unsigned long field, u16 value)
112 {
113 	u16 clean_field;
114 	int offset = get_evmcs_offset(field, &clean_field);
115 
116 	if (offset < 0)
117 		return;
118 
119 	*(u16 *)((char *)current_evmcs + offset) = value;
120 	current_evmcs->hv_clean_fields &= ~clean_field;
121 }
122 
evmcs_read64(unsigned long field)123 static __always_inline u64 evmcs_read64(unsigned long field)
124 {
125 	int offset = get_evmcs_offset(field, NULL);
126 
127 	if (offset < 0)
128 		return 0;
129 
130 	return *(u64 *)((char *)current_evmcs + offset);
131 }
132 
evmcs_read32(unsigned long field)133 static __always_inline u32 evmcs_read32(unsigned long field)
134 {
135 	int offset = get_evmcs_offset(field, NULL);
136 
137 	if (offset < 0)
138 		return 0;
139 
140 	return *(u32 *)((char *)current_evmcs + offset);
141 }
142 
evmcs_read16(unsigned long field)143 static __always_inline u16 evmcs_read16(unsigned long field)
144 {
145 	int offset = get_evmcs_offset(field, NULL);
146 
147 	if (offset < 0)
148 		return 0;
149 
150 	return *(u16 *)((char *)current_evmcs + offset);
151 }
152 
evmcs_load(u64 phys_addr)153 static inline void evmcs_load(u64 phys_addr)
154 {
155 	struct hv_vp_assist_page *vp_ap =
156 		hv_get_vp_assist_page(smp_processor_id());
157 
158 	if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
159 		vp_ap->nested_control.features.directhypercall = 1;
160 	vp_ap->current_nested_vmcs = phys_addr;
161 	vp_ap->enlighten_vmentry = 1;
162 }
163 
164 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
165 #else /* !IS_ENABLED(CONFIG_HYPERV) */
kvm_is_using_evmcs(void)166 static __always_inline bool kvm_is_using_evmcs(void) { return false; }
evmcs_write64(unsigned long field,u64 value)167 static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
evmcs_write32(unsigned long field,u32 value)168 static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
evmcs_write16(unsigned long field,u16 value)169 static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
evmcs_read64(unsigned long field)170 static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
evmcs_read32(unsigned long field)171 static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
evmcs_read16(unsigned long field)172 static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
evmcs_load(u64 phys_addr)173 static inline void evmcs_load(u64 phys_addr) {}
174 #endif /* IS_ENABLED(CONFIG_HYPERV) */
175 
176 #define EVMPTR_INVALID (-1ULL)
177 #define EVMPTR_MAP_PENDING (-2ULL)
178 
evmptr_is_valid(u64 evmptr)179 static inline bool evmptr_is_valid(u64 evmptr)
180 {
181 	return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
182 }
183 
184 enum nested_evmptrld_status {
185 	EVMPTRLD_DISABLED,
186 	EVMPTRLD_SUCCEEDED,
187 	EVMPTRLD_VMFAIL,
188 	EVMPTRLD_ERROR,
189 };
190 
191 u64 nested_get_evmptr(struct kvm_vcpu *vcpu);
192 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
193 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
194 			uint16_t *vmcs_version);
195 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
196 int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
197 bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
198 void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
199 
200 #endif /* __KVM_X86_VMX_HYPERV_H */
201