Lines Matching full:pmc

35 /* mapping between fixed pmc index and intel_arch_events array */
45 struct kvm_pmc *pmc; in reprogram_fixed_counters() local
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
53 reprogram_fixed_counter(pmc, new_ctrl, i); in reprogram_fixed_counters()
101 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
102 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) in intel_pmc_is_enabled() argument
104 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_pmc_is_enabled()
106 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); in intel_pmc_is_enabled()
237 struct kvm_pmc *pmc; in intel_msr_idx_to_pmc() local
239 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
240 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
241 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
243 return pmc; in intel_msr_idx_to_pmc()
354 struct kvm_pmc *pmc; in intel_pmu_get_msr() local
371 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
372 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
373 u64 val = pmc_read_counter(pmc); in intel_pmu_get_msr()
377 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
378 u64 val = pmc_read_counter(pmc); in intel_pmu_get_msr()
382 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
383 msr_info->data = pmc->eventsel; in intel_pmu_get_msr()
395 struct kvm_pmc *pmc; in intel_pmu_set_msr() local
431 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
432 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
439 pmc->counter += data - pmc_read_counter(pmc); in intel_pmu_set_msr()
440 if (pmc->perf_event && !pmc->is_paused) in intel_pmu_set_msr()
441 perf_event_period(pmc->perf_event, in intel_pmu_set_msr()
442 get_sample_period(pmc, data)); in intel_pmu_set_msr()
444 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
445 pmc->counter += data - pmc_read_counter(pmc); in intel_pmu_set_msr()
446 if (pmc->perf_event && !pmc->is_paused) in intel_pmu_set_msr()
447 perf_event_period(pmc->perf_event, in intel_pmu_set_msr()
448 get_sample_period(pmc, data)); in intel_pmu_set_msr()
450 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
451 if (data == pmc->eventsel) in intel_pmu_set_msr()
454 reprogram_gp_counter(pmc, data); in intel_pmu_set_msr()
574 struct kvm_pmc *pmc = NULL; in intel_pmu_reset() local
578 pmc = &pmu->gp_counters[i]; in intel_pmu_reset()
580 pmc_stop_counter(pmc); in intel_pmu_reset()
581 pmc->counter = pmc->eventsel = 0; in intel_pmu_reset()
585 pmc = &pmu->fixed_counters[i]; in intel_pmu_reset()
587 pmc_stop_counter(pmc); in intel_pmu_reset()
588 pmc->counter = 0; in intel_pmu_reset()