Lines Matching full:pmu

3  * KVM PMU support for Intel CPUs
19 #include "pmu.h"
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument
41 u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters()
44 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
45 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
52 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
54 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters()
59 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in intel_pmc_idx_to_pmc() argument
62 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, in intel_pmc_idx_to_pmc()
67 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); in intel_pmc_idx_to_pmc()
71 static void reprogram_counters(struct kvm_pmu *pmu, u64 diff) in reprogram_counters() argument
77 pmc = intel_pmc_idx_to_pmc(pmu, bit); in reprogram_counters()
85 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_hw_event_available() local
96 if ((i < 7) && !(pmu->available_event_types & (1 << i))) in intel_hw_event_available()
108 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_pmc_is_enabled() local
110 if (!intel_pmu_has_perf_global_ctrl(pmu)) in intel_pmc_is_enabled()
113 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); in intel_pmc_is_enabled()
118 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_rdpmc_ecx() local
123 return fixed ? idx < pmu->nr_arch_fixed_counters in intel_is_valid_rdpmc_ecx()
124 : idx < pmu->nr_arch_gp_counters; in intel_is_valid_rdpmc_ecx()
130 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local
137 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc()
138 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc()
140 counters = pmu->gp_counters; in intel_rdpmc_ecx_to_pmc()
141 num_counters = pmu->nr_arch_gp_counters; in intel_rdpmc_ecx_to_pmc()
145 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; in intel_rdpmc_ecx_to_pmc()
162 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument
164 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) in get_fw_gp_pmc()
167 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc()
190 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() local
199 return intel_pmu_has_perf_global_ctrl(pmu); in intel_is_valid_msr()
213 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
214 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
215 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || in intel_is_valid_msr()
225 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() local
228 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
229 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
230 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
249 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_create_guest_lbr_event() local
281 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
293 pmu->event_count++; in intel_pmu_create_guest_lbr_event()
294 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
342 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() local
348 msr_info->data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
351 msr_info->data = pmu->global_status; in intel_pmu_get_msr()
354 msr_info->data = pmu->global_ctrl; in intel_pmu_get_msr()
360 msr_info->data = pmu->pebs_enable; in intel_pmu_get_msr()
363 msr_info->data = pmu->ds_area; in intel_pmu_get_msr()
366 msr_info->data = pmu->pebs_data_cfg; in intel_pmu_get_msr()
369 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
370 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
373 val & pmu->counter_bitmask[KVM_PMC_GP]; in intel_pmu_get_msr()
375 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
378 val & pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_pmu_get_msr()
380 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
392 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() local
400 if (pmu->fixed_ctr_ctrl == data) in intel_pmu_set_msr()
402 if (!(data & pmu->fixed_ctr_ctrl_mask)) { in intel_pmu_set_msr()
403 reprogram_fixed_counters(pmu, data); in intel_pmu_set_msr()
409 pmu->global_status = data; in intel_pmu_set_msr()
414 if (pmu->global_ctrl == data) in intel_pmu_set_msr()
416 if (kvm_valid_perf_global_ctrl(pmu, data)) { in intel_pmu_set_msr()
417 diff = pmu->global_ctrl ^ data; in intel_pmu_set_msr()
418 pmu->global_ctrl = data; in intel_pmu_set_msr()
419 reprogram_counters(pmu, diff); in intel_pmu_set_msr()
424 if (!(data & pmu->global_ovf_ctrl_mask)) { in intel_pmu_set_msr()
426 pmu->global_status &= ~data; in intel_pmu_set_msr()
431 if (pmu->pebs_enable == data) in intel_pmu_set_msr()
433 if (!(data & pmu->pebs_enable_mask)) { in intel_pmu_set_msr()
434 diff = pmu->pebs_enable ^ data; in intel_pmu_set_msr()
435 pmu->pebs_enable = data; in intel_pmu_set_msr()
436 reprogram_counters(pmu, diff); in intel_pmu_set_msr()
445 pmu->ds_area = data; in intel_pmu_set_msr()
448 if (pmu->pebs_data_cfg == data) in intel_pmu_set_msr()
450 if (!(data & pmu->pebs_data_cfg_mask)) { in intel_pmu_set_msr()
451 pmu->pebs_data_cfg = data; in intel_pmu_set_msr()
456 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
457 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
459 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) in intel_pmu_set_msr()
467 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
471 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
474 reserved_bits = pmu->reserved_bits; in intel_pmu_set_msr()
476 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) in intel_pmu_set_msr()
490 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu) in setup_fixed_pmc_eventsel() argument
497 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in setup_fixed_pmc_eventsel()
498 pmc = &pmu->fixed_counters[i]; in setup_fixed_pmc_eventsel()
507 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh() local
516 pmu->nr_arch_gp_counters = 0; in intel_pmu_refresh()
517 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
518 pmu->counter_bitmask[KVM_PMC_GP] = 0; in intel_pmu_refresh()
519 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; in intel_pmu_refresh()
520 pmu->version = 0; in intel_pmu_refresh()
521 pmu->reserved_bits = 0xffffffff00200000ull; in intel_pmu_refresh()
522 pmu->raw_event_mask = X86_RAW_EVENT_MASK; in intel_pmu_refresh()
523 pmu->global_ctrl_mask = ~0ull; in intel_pmu_refresh()
524 pmu->global_ovf_ctrl_mask = ~0ull; in intel_pmu_refresh()
525 pmu->fixed_ctr_ctrl_mask = ~0ull; in intel_pmu_refresh()
526 pmu->pebs_enable_mask = ~0ull; in intel_pmu_refresh()
527 pmu->pebs_data_cfg_mask = ~0ull; in intel_pmu_refresh()
535 pmu->version = eax.split.version_id; in intel_pmu_refresh()
536 if (!pmu->version) in intel_pmu_refresh()
539 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
543 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
546 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
549 if (pmu->version == 1) { in intel_pmu_refresh()
550 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
552 pmu->nr_arch_fixed_counters = in intel_pmu_refresh()
558 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
560 setup_fixed_pmc_eventsel(pmu); in intel_pmu_refresh()
563 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) in intel_pmu_refresh()
564 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); in intel_pmu_refresh()
565 counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
566 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED)); in intel_pmu_refresh()
567 pmu->global_ctrl_mask = counter_mask; in intel_pmu_refresh()
568 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask in intel_pmu_refresh()
572 pmu->global_ovf_ctrl_mask &= in intel_pmu_refresh()
579 pmu->reserved_bits ^= HSW_IN_TX; in intel_pmu_refresh()
580 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); in intel_pmu_refresh()
583 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
584 0, pmu->nr_arch_gp_counters); in intel_pmu_refresh()
585 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
586 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); in intel_pmu_refresh()
596 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); in intel_pmu_refresh()
600 pmu->pebs_enable_mask = counter_mask; in intel_pmu_refresh()
601 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; in intel_pmu_refresh()
602 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in intel_pmu_refresh()
603 pmu->fixed_ctr_ctrl_mask &= in intel_pmu_refresh()
606 pmu->pebs_data_cfg_mask = ~0xff00000full; in intel_pmu_refresh()
608 pmu->pebs_enable_mask = in intel_pmu_refresh()
609 ~((1ull << pmu->nr_arch_gp_counters) - 1); in intel_pmu_refresh()
617 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_init() local
621 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
622 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
623 pmu->gp_counters[i].idx = i; in intel_pmu_init()
624 pmu->gp_counters[i].current_config = 0; in intel_pmu_init()
628 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
629 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
630 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; in intel_pmu_init()
631 pmu->fixed_counters[i].current_config = 0; in intel_pmu_init()
642 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_reset() local
647 pmc = &pmu->gp_counters[i]; in intel_pmu_reset()
654 pmc = &pmu->fixed_counters[i]; in intel_pmu_reset()
660 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; in intel_pmu_reset()
666 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
734 * pmu resources (e.g. LBR) that were assigned to the guest. This is
738 * confirm that the pmu features enabled to the guest are not reclaimed
744 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in vmx_passthrough_lbr_msrs() local
751 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) in vmx_passthrough_lbr_msrs()
758 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in vmx_passthrough_lbr_msrs()
776 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) in intel_pmu_cross_mapped_check() argument
781 for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl, in intel_pmu_cross_mapped_check()
783 pmc = intel_pmc_idx_to_pmc(pmu, bit); in intel_pmu_cross_mapped_check()
795 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx); in intel_pmu_cross_mapped_check()