Lines Matching full:pmc

19 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
49 static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) in kvm_pmc_to_vcpu() argument
54 pmc -= pmc->idx; in kvm_pmc_to_vcpu()
55 pmu = container_of(pmc, struct kvm_pmu, pmc[0]); in kvm_pmc_to_vcpu()
61 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
62 * @pmc: The PMU counter pointer
64 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc) in kvm_pmu_pmc_is_chained() argument
66 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_pmc_is_chained()
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
81 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
82 * @pmc: The PMU counter pointer
87 static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc) in kvm_pmu_get_canonical_pmc() argument
89 if (kvm_pmu_pmc_is_chained(pmc) && in kvm_pmu_get_canonical_pmc()
90 kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_canonical_pmc()
91 return pmc - 1; in kvm_pmu_get_canonical_pmc()
93 return pmc; in kvm_pmu_get_canonical_pmc()
95 static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc) in kvm_pmu_get_alternate_pmc() argument
97 if (kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_alternate_pmc()
98 return pmc - 1; in kvm_pmu_get_alternate_pmc()
100 return pmc + 1; in kvm_pmu_get_alternate_pmc()
126 * @pmc: The PMU counter pointer
129 struct kvm_pmc *pmc) in kvm_pmu_get_pair_counter_value() argument
133 if (kvm_pmu_pmc_is_chained(pmc)) { in kvm_pmu_get_pair_counter_value()
134 pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_get_pair_counter_value()
135 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
142 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_get_pair_counter_value()
143 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
151 if (pmc->perf_event) in kvm_pmu_get_pair_counter_value()
152 counter += perf_event_read_value(pmc->perf_event, &enabled, in kvm_pmu_get_pair_counter_value()
167 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; in kvm_pmu_get_counter_value() local
169 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_get_counter_value()
171 if (kvm_pmu_pmc_is_chained(pmc) && in kvm_pmu_get_counter_value()
200 * @pmc: The PMU counter pointer
202 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) in kvm_pmu_release_perf_event() argument
204 pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_release_perf_event()
205 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
206 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
207 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
208 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
214 * @pmc: The PMU counter pointer
218 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) in kvm_pmu_stop_counter() argument
222 pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_stop_counter()
223 if (!pmc->perf_event) in kvm_pmu_stop_counter()
226 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_stop_counter()
228 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) { in kvm_pmu_stop_counter()
232 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_stop_counter()
238 if (kvm_pmu_pmc_is_chained(pmc)) in kvm_pmu_stop_counter()
241 kvm_pmu_release_perf_event(pmc); in kvm_pmu_stop_counter()
255 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
286 kvm_pmu_release_perf_event(&pmu->pmc[i]); in kvm_pmu_vcpu_destroy()
312 struct kvm_pmc *pmc; in kvm_pmu_enable_counter_mask() local
321 pmc = &pmu->pmc[i]; in kvm_pmu_enable_counter_mask()
327 /* At this point, pmc must be the canonical */ in kvm_pmu_enable_counter_mask()
328 if (pmc->perf_event) { in kvm_pmu_enable_counter_mask()
329 perf_event_enable(pmc->perf_event); in kvm_pmu_enable_counter_mask()
330 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) in kvm_pmu_enable_counter_mask()
347 struct kvm_pmc *pmc; in kvm_pmu_disable_counter_mask() local
356 pmc = &pmu->pmc[i]; in kvm_pmu_disable_counter_mask()
362 /* At this point, pmc must be the canonical */ in kvm_pmu_disable_counter_mask()
363 if (pmc->perf_event) in kvm_pmu_disable_counter_mask()
364 perf_event_disable(pmc->perf_event); in kvm_pmu_disable_counter_mask()
463 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
475 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_pmu_perf_overflow() local
477 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_perf_overflow()
478 int idx = pmc->idx; in kvm_pmu_perf_overflow()
489 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_perf_overflow()
546 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { in kvm_pmu_software_increment()
600 struct kvm_pmc *pmc; in kvm_pmu_create_perf_event() local
610 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]); in kvm_pmu_create_perf_event()
612 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
613 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx; in kvm_pmu_create_perf_event()
616 kvm_pmu_stop_counter(vcpu, pmc); in kvm_pmu_create_perf_event()
617 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
638 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); in kvm_pmu_create_perf_event()
645 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_create_perf_event()
647 if (kvm_pmu_pmc_is_chained(pmc)) { in kvm_pmu_create_perf_event()
658 pmc + 1); in kvm_pmu_create_perf_event()
661 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_create_perf_event()
667 kvm_pmu_perf_overflow, pmc); in kvm_pmu_create_perf_event()
676 pmc->perf_event = event; in kvm_pmu_create_perf_event()
690 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; in kvm_pmu_update_pmc_chained() local
693 old_state = kvm_pmu_pmc_is_chained(pmc); in kvm_pmu_update_pmc_chained()
694 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && in kvm_pmu_update_pmc_chained()
695 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); in kvm_pmu_update_pmc_chained()
700 canonical_pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_update_pmc_chained()
707 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc)); in kvm_pmu_update_pmc_chained()
708 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
711 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
720 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an