Lines Matching full:pmu

3  * KVM PMU support for Intel CPUs
19 #include "pmu.h"
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); in reprogram_fixed_counters()
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters()
56 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) in global_ctrl_changed() argument
63 u64 diff = pmu->global_ctrl ^ data; in global_ctrl_changed()
65 pmu->global_ctrl = data; in global_ctrl_changed()
68 reprogram_counter(pmu, bit); in global_ctrl_changed()
71 static unsigned intel_find_arch_event(struct kvm_pmu *pmu, in intel_find_arch_event() argument
80 && (pmu->available_event_types & (1 << i))) in intel_find_arch_event()
104 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_pmc_is_enabled() local
106 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); in intel_pmc_is_enabled()
109 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in intel_pmc_idx_to_pmc() argument
112 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, in intel_pmc_idx_to_pmc()
117 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); in intel_pmc_idx_to_pmc()
124 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_rdpmc_ecx() local
129 return (!fixed && idx >= pmu->nr_arch_gp_counters) || in intel_is_valid_rdpmc_ecx()
130 (fixed && idx >= pmu->nr_arch_fixed_counters); in intel_is_valid_rdpmc_ecx()
136 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local
143 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc()
144 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc()
146 counters = pmu->gp_counters; in intel_rdpmc_ecx_to_pmc()
147 num_counters = pmu->nr_arch_gp_counters; in intel_rdpmc_ecx_to_pmc()
151 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; in intel_rdpmc_ecx_to_pmc()
168 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument
170 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) in get_fw_gp_pmc()
173 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc()
213 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() local
221 ret = pmu->version > 1; in intel_is_valid_msr()
224 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
225 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
226 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || in intel_is_valid_msr()
236 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() local
239 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
240 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
241 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
260 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_create_guest_lbr_event() local
292 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
304 pmu->event_count++; in intel_pmu_create_guest_lbr_event()
305 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
353 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() local
359 msr_info->data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
362 msr_info->data = pmu->global_status; in intel_pmu_get_msr()
365 msr_info->data = pmu->global_ctrl; in intel_pmu_get_msr()
368 msr_info->data = pmu->global_ovf_ctrl; in intel_pmu_get_msr()
371 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
372 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
375 val & pmu->counter_bitmask[KVM_PMC_GP]; in intel_pmu_get_msr()
377 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
380 val & pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_pmu_get_msr()
382 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
394 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() local
401 if (pmu->fixed_ctr_ctrl == data) in intel_pmu_set_msr()
404 reprogram_fixed_counters(pmu, data); in intel_pmu_set_msr()
410 pmu->global_status = data; in intel_pmu_set_msr()
415 if (pmu->global_ctrl == data) in intel_pmu_set_msr()
417 if (kvm_valid_perf_global_ctrl(pmu, data)) { in intel_pmu_set_msr()
418 global_ctrl_changed(pmu, data); in intel_pmu_set_msr()
423 if (!(data & pmu->global_ovf_ctrl_mask)) { in intel_pmu_set_msr()
425 pmu->global_status &= ~data; in intel_pmu_set_msr()
426 pmu->global_ovf_ctrl = data; in intel_pmu_set_msr()
431 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
432 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
434 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) in intel_pmu_set_msr()
444 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
450 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
453 if (!(data & pmu->reserved_bits)) { in intel_pmu_set_msr()
466 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh() local
474 pmu->nr_arch_gp_counters = 0; in intel_pmu_refresh()
475 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
476 pmu->counter_bitmask[KVM_PMC_GP] = 0; in intel_pmu_refresh()
477 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; in intel_pmu_refresh()
478 pmu->version = 0; in intel_pmu_refresh()
479 pmu->reserved_bits = 0xffffffff00200000ull; in intel_pmu_refresh()
487 pmu->version = eax.split.version_id; in intel_pmu_refresh()
488 if (!pmu->version) in intel_pmu_refresh()
493 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
496 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
498 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
501 if (pmu->version == 1) { in intel_pmu_refresh()
502 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
504 pmu->nr_arch_fixed_counters = in intel_pmu_refresh()
509 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
513 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
514 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); in intel_pmu_refresh()
515 pmu->global_ctrl_mask = ~pmu->global_ctrl; in intel_pmu_refresh()
516 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask in intel_pmu_refresh()
520 pmu->global_ovf_ctrl_mask &= in intel_pmu_refresh()
527 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; in intel_pmu_refresh()
529 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
530 0, pmu->nr_arch_gp_counters); in intel_pmu_refresh()
531 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
532 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); in intel_pmu_refresh()
542 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); in intel_pmu_refresh()
548 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_init() local
552 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
553 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
554 pmu->gp_counters[i].idx = i; in intel_pmu_init()
555 pmu->gp_counters[i].current_config = 0; in intel_pmu_init()
559 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
560 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
561 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; in intel_pmu_init()
562 pmu->fixed_counters[i].current_config = 0; in intel_pmu_init()
573 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_reset() local
578 pmc = &pmu->gp_counters[i]; in intel_pmu_reset()
585 pmc = &pmu->fixed_counters[i]; in intel_pmu_reset()
591 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = in intel_pmu_reset()
592 pmu->global_ovf_ctrl = 0; in intel_pmu_reset()
598 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
666 * pmu resources (e.g. LBR) that were assigned to the guest. This is
670 * confirm that the pmu features enabled to the guest are not reclaimed
676 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in vmx_passthrough_lbr_msrs() local
683 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) in vmx_passthrough_lbr_msrs()
690 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in vmx_passthrough_lbr_msrs()