1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4
5 #include "x86.h"
6 #include "reverse_cpuid.h"
7 #include <asm/cpu.h>
8 #include <asm/processor.h>
9 #include <uapi/asm/kvm_para.h>
10
11 extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
12 void kvm_set_cpu_caps(void);
13
14 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
15 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
16 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
17 u32 function, u32 index);
18 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
19 u32 function);
20 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
21 struct kvm_cpuid_entry2 __user *entries,
22 unsigned int type);
23 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
24 struct kvm_cpuid *cpuid,
25 struct kvm_cpuid_entry __user *entries);
26 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27 struct kvm_cpuid2 *cpuid,
28 struct kvm_cpuid_entry2 __user *entries);
29 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
30 struct kvm_cpuid2 *cpuid,
31 struct kvm_cpuid_entry2 __user *entries);
32 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
33 u32 *ecx, u32 *edx, bool exact_only);
34
35 u32 xstate_required_size(u64 xstate_bv, bool compacted);
36
37 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
38 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
39
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)40 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
41 {
42 return vcpu->arch.maxphyaddr;
43 }
44
kvm_vcpu_is_legal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)45 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
46 {
47 return !(gpa & vcpu->arch.reserved_gpa_bits);
48 }
49
kvm_vcpu_is_illegal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)50 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
51 {
52 return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
53 }
54
kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,gpa_t alignment)55 static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
56 gpa_t gpa, gpa_t alignment)
57 {
58 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
59 }
60
page_address_valid(struct kvm_vcpu * vcpu,gpa_t gpa)61 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
62 {
63 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
64 }
65
cpuid_entry_override(struct kvm_cpuid_entry2 * entry,unsigned int leaf)66 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
67 unsigned int leaf)
68 {
69 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
70
71 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
72 *reg = kvm_cpu_caps[leaf];
73 }
74
guest_cpuid_get_register(struct kvm_vcpu * vcpu,unsigned int x86_feature)75 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
76 unsigned int x86_feature)
77 {
78 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
79 struct kvm_cpuid_entry2 *entry;
80
81 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
82 if (!entry)
83 return NULL;
84
85 return __cpuid_entry_get_reg(entry, cpuid.reg);
86 }
87
guest_cpuid_has(struct kvm_vcpu * vcpu,unsigned int x86_feature)88 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
89 unsigned int x86_feature)
90 {
91 u32 *reg;
92
93 reg = guest_cpuid_get_register(vcpu, x86_feature);
94 if (!reg)
95 return false;
96
97 return *reg & __feature_bit(x86_feature);
98 }
99
guest_cpuid_clear(struct kvm_vcpu * vcpu,unsigned int x86_feature)100 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
101 unsigned int x86_feature)
102 {
103 u32 *reg;
104
105 reg = guest_cpuid_get_register(vcpu, x86_feature);
106 if (reg)
107 *reg &= ~__feature_bit(x86_feature);
108 }
109
guest_cpuid_is_amd_or_hygon(struct kvm_vcpu * vcpu)110 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
111 {
112 struct kvm_cpuid_entry2 *best;
113
114 best = kvm_find_cpuid_entry(vcpu, 0);
115 return best &&
116 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
117 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
118 }
119
guest_cpuid_is_intel(struct kvm_vcpu * vcpu)120 static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
121 {
122 struct kvm_cpuid_entry2 *best;
123
124 best = kvm_find_cpuid_entry(vcpu, 0);
125 return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
126 }
127
guest_cpuid_family(struct kvm_vcpu * vcpu)128 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
129 {
130 struct kvm_cpuid_entry2 *best;
131
132 best = kvm_find_cpuid_entry(vcpu, 0x1);
133 if (!best)
134 return -1;
135
136 return x86_family(best->eax);
137 }
138
guest_cpuid_model(struct kvm_vcpu * vcpu)139 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
140 {
141 struct kvm_cpuid_entry2 *best;
142
143 best = kvm_find_cpuid_entry(vcpu, 0x1);
144 if (!best)
145 return -1;
146
147 return x86_model(best->eax);
148 }
149
cpuid_model_is_consistent(struct kvm_vcpu * vcpu)150 static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
151 {
152 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
153 }
154
guest_cpuid_stepping(struct kvm_vcpu * vcpu)155 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
156 {
157 struct kvm_cpuid_entry2 *best;
158
159 best = kvm_find_cpuid_entry(vcpu, 0x1);
160 if (!best)
161 return -1;
162
163 return x86_stepping(best->eax);
164 }
165
guest_has_spec_ctrl_msr(struct kvm_vcpu * vcpu)166 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
167 {
168 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
169 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
170 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
171 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
172 }
173
guest_has_pred_cmd_msr(struct kvm_vcpu * vcpu)174 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
175 {
176 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
177 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
178 }
179
supports_cpuid_fault(struct kvm_vcpu * vcpu)180 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
181 {
182 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
183 }
184
cpuid_fault_enabled(struct kvm_vcpu * vcpu)185 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
186 {
187 return vcpu->arch.msr_misc_features_enables &
188 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
189 }
190
kvm_cpu_cap_clear(unsigned int x86_feature)191 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
192 {
193 unsigned int x86_leaf = __feature_leaf(x86_feature);
194
195 reverse_cpuid_check(x86_leaf);
196 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
197 }
198
kvm_cpu_cap_set(unsigned int x86_feature)199 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
200 {
201 unsigned int x86_leaf = __feature_leaf(x86_feature);
202
203 reverse_cpuid_check(x86_leaf);
204 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
205 }
206
kvm_cpu_cap_get(unsigned int x86_feature)207 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
208 {
209 unsigned int x86_leaf = __feature_leaf(x86_feature);
210
211 reverse_cpuid_check(x86_leaf);
212 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
213 }
214
kvm_cpu_cap_has(unsigned int x86_feature)215 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
216 {
217 return !!kvm_cpu_cap_get(x86_feature);
218 }
219
kvm_cpu_cap_check_and_set(unsigned int x86_feature)220 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
221 {
222 if (boot_cpu_has(x86_feature))
223 kvm_cpu_cap_set(x86_feature);
224 }
225
guest_pv_has(struct kvm_vcpu * vcpu,unsigned int kvm_feature)226 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
227 unsigned int kvm_feature)
228 {
229 if (!vcpu->arch.pv_cpuid.enforce)
230 return true;
231
232 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
233 }
234
235 #endif
236