1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM PMU support for Intel CPUs
4 *
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6 *
7 * Authors:
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
10 */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19
20 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
21 /* Index must match CPUID 0x0A.EBX bit vector */
22 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
23 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
24 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
25 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
26 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
27 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
28 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
29 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
30 };
31
32 /* mapping between fixed pmc index and intel_arch_events array */
33 static int fixed_pmc_events[] = {1, 0, 7};
34
reprogram_fixed_counters(struct kvm_pmu * pmu,u64 data)35 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
36 {
37 int i;
38
39 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
40 u8 new_ctrl = fixed_ctrl_field(data, i);
41 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
42 struct kvm_pmc *pmc;
43
44 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
45
46 if (old_ctrl == new_ctrl)
47 continue;
48
49 reprogram_fixed_counter(pmc, new_ctrl, i);
50 }
51
52 pmu->fixed_ctr_ctrl = data;
53 }
54
55 /* function is called when global control register has been updated. */
global_ctrl_changed(struct kvm_pmu * pmu,u64 data)56 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
57 {
58 int bit;
59 u64 diff = pmu->global_ctrl ^ data;
60
61 pmu->global_ctrl = data;
62
63 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
64 reprogram_counter(pmu, bit);
65 }
66
intel_find_arch_event(struct kvm_pmu * pmu,u8 event_select,u8 unit_mask)67 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
68 u8 event_select,
69 u8 unit_mask)
70 {
71 int i;
72
73 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
74 if (intel_arch_events[i].eventsel == event_select
75 && intel_arch_events[i].unit_mask == unit_mask
76 && (pmu->available_event_types & (1 << i)))
77 break;
78
79 if (i == ARRAY_SIZE(intel_arch_events))
80 return PERF_COUNT_HW_MAX;
81
82 return intel_arch_events[i].event_type;
83 }
84
intel_find_fixed_event(int idx)85 static unsigned intel_find_fixed_event(int idx)
86 {
87 if (idx >= ARRAY_SIZE(fixed_pmc_events))
88 return PERF_COUNT_HW_MAX;
89
90 return intel_arch_events[fixed_pmc_events[idx]].event_type;
91 }
92
93 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
intel_pmc_is_enabled(struct kvm_pmc * pmc)94 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
95 {
96 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
97
98 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
99 }
100
intel_pmc_idx_to_pmc(struct kvm_pmu * pmu,int pmc_idx)101 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
102 {
103 if (pmc_idx < INTEL_PMC_IDX_FIXED)
104 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
105 MSR_P6_EVNTSEL0);
106 else {
107 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
108
109 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
110 }
111 }
112
113 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
intel_is_valid_msr_idx(struct kvm_vcpu * vcpu,unsigned idx)114 static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
115 {
116 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
117 bool fixed = idx & (1u << 30);
118
119 idx &= ~(3u << 30);
120
121 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
122 (fixed && idx >= pmu->nr_arch_fixed_counters);
123 }
124
intel_msr_idx_to_pmc(struct kvm_vcpu * vcpu,unsigned idx,u64 * mask)125 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
126 unsigned idx, u64 *mask)
127 {
128 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
129 bool fixed = idx & (1u << 30);
130 struct kvm_pmc *counters;
131
132 idx &= ~(3u << 30);
133 if (!fixed && idx >= pmu->nr_arch_gp_counters)
134 return NULL;
135 if (fixed && idx >= pmu->nr_arch_fixed_counters)
136 return NULL;
137 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
138 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
139
140 return &counters[idx];
141 }
142
intel_is_valid_msr(struct kvm_vcpu * vcpu,u32 msr)143 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
144 {
145 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
146 int ret;
147
148 switch (msr) {
149 case MSR_CORE_PERF_FIXED_CTR_CTRL:
150 case MSR_CORE_PERF_GLOBAL_STATUS:
151 case MSR_CORE_PERF_GLOBAL_CTRL:
152 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
153 ret = pmu->version > 1;
154 break;
155 default:
156 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
157 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
158 get_fixed_pmc(pmu, msr);
159 break;
160 }
161
162 return ret;
163 }
164
intel_pmu_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * data)165 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
166 {
167 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
168 struct kvm_pmc *pmc;
169
170 switch (msr) {
171 case MSR_CORE_PERF_FIXED_CTR_CTRL:
172 *data = pmu->fixed_ctr_ctrl;
173 return 0;
174 case MSR_CORE_PERF_GLOBAL_STATUS:
175 *data = pmu->global_status;
176 return 0;
177 case MSR_CORE_PERF_GLOBAL_CTRL:
178 *data = pmu->global_ctrl;
179 return 0;
180 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
181 *data = pmu->global_ovf_ctrl;
182 return 0;
183 default:
184 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
185 u64 val = pmc_read_counter(pmc);
186 *data = val & pmu->counter_bitmask[KVM_PMC_GP];
187 return 0;
188 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
189 u64 val = pmc_read_counter(pmc);
190 *data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
191 return 0;
192 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
193 *data = pmc->eventsel;
194 return 0;
195 }
196 }
197
198 return 1;
199 }
200
intel_pmu_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)201 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
202 {
203 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
204 struct kvm_pmc *pmc;
205 u32 msr = msr_info->index;
206 u64 data = msr_info->data;
207
208 switch (msr) {
209 case MSR_CORE_PERF_FIXED_CTR_CTRL:
210 if (pmu->fixed_ctr_ctrl == data)
211 return 0;
212 if (!(data & 0xfffffffffffff444ull)) {
213 reprogram_fixed_counters(pmu, data);
214 return 0;
215 }
216 break;
217 case MSR_CORE_PERF_GLOBAL_STATUS:
218 if (msr_info->host_initiated) {
219 pmu->global_status = data;
220 return 0;
221 }
222 break; /* RO MSR */
223 case MSR_CORE_PERF_GLOBAL_CTRL:
224 if (pmu->global_ctrl == data)
225 return 0;
226 if (!(data & pmu->global_ctrl_mask)) {
227 global_ctrl_changed(pmu, data);
228 return 0;
229 }
230 break;
231 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
232 if (!(data & pmu->global_ovf_ctrl_mask)) {
233 if (!msr_info->host_initiated)
234 pmu->global_status &= ~data;
235 pmu->global_ovf_ctrl = data;
236 return 0;
237 }
238 break;
239 default:
240 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
241 if (msr_info->host_initiated)
242 pmc->counter = data;
243 else
244 pmc->counter = (s32)data;
245 return 0;
246 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
247 pmc->counter = data;
248 return 0;
249 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
250 if (data == pmc->eventsel)
251 return 0;
252 if (!(data & pmu->reserved_bits)) {
253 reprogram_gp_counter(pmc, data);
254 return 0;
255 }
256 }
257 }
258
259 return 1;
260 }
261
intel_pmu_refresh(struct kvm_vcpu * vcpu)262 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
263 {
264 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
265 struct x86_pmu_capability x86_pmu;
266 struct kvm_cpuid_entry2 *entry;
267 union cpuid10_eax eax;
268 union cpuid10_edx edx;
269
270 pmu->nr_arch_gp_counters = 0;
271 pmu->nr_arch_fixed_counters = 0;
272 pmu->counter_bitmask[KVM_PMC_GP] = 0;
273 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
274 pmu->version = 0;
275 pmu->reserved_bits = 0xffffffff00200000ull;
276
277 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
278 if (!entry)
279 return;
280 eax.full = entry->eax;
281 edx.full = entry->edx;
282
283 pmu->version = eax.split.version_id;
284 if (!pmu->version)
285 return;
286
287 perf_get_x86_pmu_capability(&x86_pmu);
288
289 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
290 x86_pmu.num_counters_gp);
291 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
292 pmu->available_event_types = ~entry->ebx &
293 ((1ull << eax.split.mask_length) - 1);
294
295 if (pmu->version == 1) {
296 pmu->nr_arch_fixed_counters = 0;
297 } else {
298 pmu->nr_arch_fixed_counters =
299 min_t(int, edx.split.num_counters_fixed,
300 x86_pmu.num_counters_fixed);
301 pmu->counter_bitmask[KVM_PMC_FIXED] =
302 ((u64)1 << edx.split.bit_width_fixed) - 1;
303 }
304
305 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
306 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
307 pmu->global_ctrl_mask = ~pmu->global_ctrl;
308 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
309 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
310 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
311 if (kvm_x86_ops->pt_supported())
312 pmu->global_ovf_ctrl_mask &=
313 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
314
315 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
316 if (entry &&
317 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
318 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
319 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
320 }
321
intel_pmu_init(struct kvm_vcpu * vcpu)322 static void intel_pmu_init(struct kvm_vcpu *vcpu)
323 {
324 int i;
325 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
326
327 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
328 pmu->gp_counters[i].type = KVM_PMC_GP;
329 pmu->gp_counters[i].vcpu = vcpu;
330 pmu->gp_counters[i].idx = i;
331 }
332
333 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
334 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
335 pmu->fixed_counters[i].vcpu = vcpu;
336 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
337 }
338 }
339
intel_pmu_reset(struct kvm_vcpu * vcpu)340 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
341 {
342 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
343 struct kvm_pmc *pmc = NULL;
344 int i;
345
346 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
347 pmc = &pmu->gp_counters[i];
348
349 pmc_stop_counter(pmc);
350 pmc->counter = pmc->eventsel = 0;
351 }
352
353 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
354 pmc = &pmu->fixed_counters[i];
355
356 pmc_stop_counter(pmc);
357 pmc->counter = 0;
358 }
359
360 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
361 pmu->global_ovf_ctrl = 0;
362 }
363
364 struct kvm_pmu_ops intel_pmu_ops = {
365 .find_arch_event = intel_find_arch_event,
366 .find_fixed_event = intel_find_fixed_event,
367 .pmc_is_enabled = intel_pmc_is_enabled,
368 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
369 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
370 .is_valid_msr_idx = intel_is_valid_msr_idx,
371 .is_valid_msr = intel_is_valid_msr,
372 .get_msr = intel_pmu_get_msr,
373 .set_msr = intel_pmu_set_msr,
374 .refresh = intel_pmu_refresh,
375 .init = intel_pmu_init,
376 .reset = intel_pmu_reset,
377 };
378