1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4
5 #include <linux/nospec.h>
6
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
10
11 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
12 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
13
14 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
15 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
16
17 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
18 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
19 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
20
21 struct kvm_pmu_ops {
22 bool (*hw_event_available)(struct kvm_pmc *pmc);
23 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
24 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
25 unsigned int idx, u64 *mask);
26 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
27 bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
28 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
29 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
30 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
31 void (*refresh)(struct kvm_vcpu *vcpu);
32 void (*init)(struct kvm_vcpu *vcpu);
33 void (*reset)(struct kvm_vcpu *vcpu);
34 void (*deliver_pmi)(struct kvm_vcpu *vcpu);
35 void (*cleanup)(struct kvm_vcpu *vcpu);
36
37 const u64 EVENTSEL_EVENT;
38 const int MAX_NR_GP_COUNTERS;
39 const int MIN_NR_GP_COUNTERS;
40 };
41
42 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
43
kvm_pmu_has_perf_global_ctrl(struct kvm_pmu * pmu)44 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
45 {
46 /*
47 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
48 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
49 * greater than zero. However, KVM only exposes and emulates the MSR
50 * to/for the guest if the guest PMU supports at least "Architectural
51 * Performance Monitoring Version 2".
52 *
53 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
54 */
55 return pmu->version > 1;
56 }
57
pmc_bitmask(struct kvm_pmc * pmc)58 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
59 {
60 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
61
62 return pmu->counter_bitmask[pmc->type];
63 }
64
pmc_read_counter(struct kvm_pmc * pmc)65 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
66 {
67 u64 counter, enabled, running;
68
69 counter = pmc->counter;
70 if (pmc->perf_event && !pmc->is_paused)
71 counter += perf_event_read_value(pmc->perf_event,
72 &enabled, &running);
73 /* FIXME: Scaling needed? */
74 return counter & pmc_bitmask(pmc);
75 }
76
pmc_write_counter(struct kvm_pmc * pmc,u64 val)77 static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
78 {
79 pmc->counter += val - pmc_read_counter(pmc);
80 pmc->counter &= pmc_bitmask(pmc);
81 }
82
pmc_release_perf_event(struct kvm_pmc * pmc)83 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
84 {
85 if (pmc->perf_event) {
86 perf_event_release_kernel(pmc->perf_event);
87 pmc->perf_event = NULL;
88 pmc->current_config = 0;
89 pmc_to_pmu(pmc)->event_count--;
90 }
91 }
92
pmc_stop_counter(struct kvm_pmc * pmc)93 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
94 {
95 if (pmc->perf_event) {
96 pmc->counter = pmc_read_counter(pmc);
97 pmc_release_perf_event(pmc);
98 }
99 }
100
pmc_is_gp(struct kvm_pmc * pmc)101 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
102 {
103 return pmc->type == KVM_PMC_GP;
104 }
105
pmc_is_fixed(struct kvm_pmc * pmc)106 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
107 {
108 return pmc->type == KVM_PMC_FIXED;
109 }
110
kvm_valid_perf_global_ctrl(struct kvm_pmu * pmu,u64 data)111 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
112 u64 data)
113 {
114 return !(pmu->global_ctrl_mask & data);
115 }
116
117 /* returns general purpose PMC with the specified MSR. Note that it can be
118 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
119 * parameter to tell them apart.
120 */
get_gp_pmc(struct kvm_pmu * pmu,u32 msr,u32 base)121 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
122 u32 base)
123 {
124 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
125 u32 index = array_index_nospec(msr - base,
126 pmu->nr_arch_gp_counters);
127
128 return &pmu->gp_counters[index];
129 }
130
131 return NULL;
132 }
133
134 /* returns fixed PMC with the specified MSR */
get_fixed_pmc(struct kvm_pmu * pmu,u32 msr)135 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
136 {
137 int base = MSR_CORE_PERF_FIXED_CTR0;
138
139 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
140 u32 index = array_index_nospec(msr - base,
141 pmu->nr_arch_fixed_counters);
142
143 return &pmu->fixed_counters[index];
144 }
145
146 return NULL;
147 }
148
get_sample_period(struct kvm_pmc * pmc,u64 counter_value)149 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
150 {
151 u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
152
153 if (!sample_period)
154 sample_period = pmc_bitmask(pmc) + 1;
155 return sample_period;
156 }
157
pmc_update_sample_period(struct kvm_pmc * pmc)158 static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
159 {
160 if (!pmc->perf_event || pmc->is_paused ||
161 !is_sampling_event(pmc->perf_event))
162 return;
163
164 perf_event_period(pmc->perf_event,
165 get_sample_period(pmc, pmc->counter));
166 }
167
pmc_speculative_in_use(struct kvm_pmc * pmc)168 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
169 {
170 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
171
172 if (pmc_is_fixed(pmc))
173 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
174 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
175
176 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
177 }
178
179 extern struct x86_pmu_capability kvm_pmu_cap;
180
kvm_init_pmu_capability(const struct kvm_pmu_ops * pmu_ops)181 static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
182 {
183 bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
184 int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
185
186 /*
187 * Hybrid PMUs don't play nice with virtualization without careful
188 * configuration by userspace, and KVM's APIs for reporting supported
189 * vPMU features do not account for hybrid PMUs. Disable vPMU support
190 * for hybrid PMUs until KVM gains a way to let userspace opt-in.
191 */
192 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
193 enable_pmu = false;
194
195 if (enable_pmu) {
196 perf_get_x86_pmu_capability(&kvm_pmu_cap);
197
198 /*
199 * WARN if perf did NOT disable hardware PMU if the number of
200 * architecturally required GP counters aren't present, i.e. if
201 * there are a non-zero number of counters, but fewer than what
202 * is architecturally required.
203 */
204 if (!kvm_pmu_cap.num_counters_gp ||
205 WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
206 enable_pmu = false;
207 else if (is_intel && !kvm_pmu_cap.version)
208 enable_pmu = false;
209 }
210
211 if (!enable_pmu) {
212 memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
213 return;
214 }
215
216 kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
217 kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
218 pmu_ops->MAX_NR_GP_COUNTERS);
219 kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
220 KVM_PMC_MAX_FIXED);
221 }
222
kvm_pmu_request_counter_reprogram(struct kvm_pmc * pmc)223 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
224 {
225 set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
226 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
227 }
228
reprogram_counters(struct kvm_pmu * pmu,u64 diff)229 static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
230 {
231 int bit;
232
233 if (!diff)
234 return;
235
236 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
237 set_bit(bit, pmu->reprogram_pmi);
238 kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
239 }
240
241 /*
242 * Check if a PMC is enabled by comparing it against global_ctrl bits.
243 *
244 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
245 */
pmc_is_globally_enabled(struct kvm_pmc * pmc)246 static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
247 {
248 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
249
250 if (!kvm_pmu_has_perf_global_ctrl(pmu))
251 return true;
252
253 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
254 }
255
256 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
257 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
258 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
259 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
260 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
261 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
262 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
263 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
264 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
265 void kvm_pmu_init(struct kvm_vcpu *vcpu);
266 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
267 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
268 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
269 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
270
271 bool is_vmware_backdoor_pmc(u32 pmc_idx);
272
273 extern struct kvm_pmu_ops intel_pmu_ops;
274 extern struct kvm_pmu_ops amd_pmu_ops;
275 #endif /* __KVM_X86_PMU_H */
276