Lines Matching full:pmc

35 /* mapping between fixed pmc index and intel_arch_events array */
40 struct kvm_pmc *pmc; in reprogram_fixed_counters() local
52 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
55 reprogram_counter(pmc); in reprogram_fixed_counters()
74 struct kvm_pmc *pmc; in reprogram_counters() local
77 pmc = intel_pmc_idx_to_pmc(pmu, bit); in reprogram_counters()
78 if (pmc) in reprogram_counters()
79 reprogram_counter(pmc); in reprogram_counters()
83 static bool intel_hw_event_available(struct kvm_pmc *pmc) in intel_hw_event_available() argument
85 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_hw_event_available()
86 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; in intel_hw_event_available()
87 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; in intel_hw_event_available()
105 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
106 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) in intel_pmc_is_enabled() argument
108 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_pmc_is_enabled()
113 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); in intel_pmc_is_enabled()
226 struct kvm_pmc *pmc; in intel_msr_idx_to_pmc() local
228 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
229 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
230 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
232 return pmc; in intel_msr_idx_to_pmc()
343 struct kvm_pmc *pmc; in intel_pmu_get_msr() local
369 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
370 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
371 u64 val = pmc_read_counter(pmc); in intel_pmu_get_msr()
375 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
376 u64 val = pmc_read_counter(pmc); in intel_pmu_get_msr()
380 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
381 msr_info->data = pmc->eventsel; in intel_pmu_get_msr()
393 struct kvm_pmc *pmc; in intel_pmu_set_msr() local
456 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
457 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
464 pmc->counter += data - pmc_read_counter(pmc); in intel_pmu_set_msr()
465 pmc_update_sample_period(pmc); in intel_pmu_set_msr()
467 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
468 pmc->counter += data - pmc_read_counter(pmc); in intel_pmu_set_msr()
469 pmc_update_sample_period(pmc); in intel_pmu_set_msr()
471 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
472 if (data == pmc->eventsel) in intel_pmu_set_msr()
475 if ((pmc->idx == 2) && in intel_pmu_set_msr()
479 pmc->eventsel = data; in intel_pmu_set_msr()
480 reprogram_counter(pmc); in intel_pmu_set_msr()
493 struct kvm_pmc *pmc; in setup_fixed_pmc_eventsel() local
498 pmc = &pmu->fixed_counters[i]; in setup_fixed_pmc_eventsel()
500 pmc->eventsel = (intel_arch_events[event].unit_mask << 8) | in setup_fixed_pmc_eventsel()
643 struct kvm_pmc *pmc = NULL; in intel_pmu_reset() local
647 pmc = &pmu->gp_counters[i]; in intel_pmu_reset()
649 pmc_stop_counter(pmc); in intel_pmu_reset()
650 pmc->counter = pmc->eventsel = 0; in intel_pmu_reset()
654 pmc = &pmu->fixed_counters[i]; in intel_pmu_reset()
656 pmc_stop_counter(pmc); in intel_pmu_reset()
657 pmc->counter = 0; in intel_pmu_reset()
778 struct kvm_pmc *pmc = NULL; in intel_pmu_cross_mapped_check() local
783 pmc = intel_pmc_idx_to_pmc(pmu, bit); in intel_pmu_cross_mapped_check()
785 if (!pmc || !pmc_speculative_in_use(pmc) || in intel_pmu_cross_mapped_check()
786 !intel_pmc_is_enabled(pmc) || !pmc->perf_event) in intel_pmu_cross_mapped_check()
793 hw_idx = pmc->perf_event->hw.idx; in intel_pmu_cross_mapped_check()
794 if (hw_idx != pmc->idx && hw_idx > -1) in intel_pmu_cross_mapped_check()