Lines Matching full:pmc

25 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
60 static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) in kvm_pmc_to_vcpu() argument
65 pmc -= pmc->idx; in kvm_pmc_to_vcpu()
66 pmu = container_of(pmc, struct kvm_pmu, pmc[0]); in kvm_pmc_to_vcpu()
72 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
73 * @pmc: The PMU counter pointer
75 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc) in kvm_pmu_pmc_is_chained() argument
77 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_pmc_is_chained()
79 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
92 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
93 * @pmc: The PMU counter pointer
98 static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc) in kvm_pmu_get_canonical_pmc() argument
100 if (kvm_pmu_pmc_is_chained(pmc) && in kvm_pmu_get_canonical_pmc()
101 kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_canonical_pmc()
102 return pmc - 1; in kvm_pmu_get_canonical_pmc()
104 return pmc; in kvm_pmu_get_canonical_pmc()
106 static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc) in kvm_pmu_get_alternate_pmc() argument
108 if (kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_alternate_pmc()
109 return pmc - 1; in kvm_pmu_get_alternate_pmc()
111 return pmc + 1; in kvm_pmu_get_alternate_pmc()
137 * @pmc: The PMU counter pointer
140 struct kvm_pmc *pmc) in kvm_pmu_get_pair_counter_value() argument
144 if (kvm_pmu_pmc_is_chained(pmc)) { in kvm_pmu_get_pair_counter_value()
145 pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_get_pair_counter_value()
146 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
153 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_get_pair_counter_value()
154 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
162 if (pmc->perf_event) in kvm_pmu_get_pair_counter_value()
163 counter += perf_event_read_value(pmc->perf_event, &enabled, in kvm_pmu_get_pair_counter_value()
178 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; in kvm_pmu_get_counter_value() local
183 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_get_counter_value()
185 if (kvm_pmu_pmc_is_chained(pmc) && in kvm_pmu_get_counter_value()
217 * @pmc: The PMU counter pointer
219 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) in kvm_pmu_release_perf_event() argument
221 pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_release_perf_event()
222 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
223 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
224 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
225 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
231 * @pmc: The PMU counter pointer
235 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) in kvm_pmu_stop_counter() argument
239 pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_stop_counter()
240 if (!pmc->perf_event) in kvm_pmu_stop_counter()
243 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_stop_counter()
245 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) { in kvm_pmu_stop_counter()
249 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_stop_counter()
255 if (kvm_pmu_pmc_is_chained(pmc)) in kvm_pmu_stop_counter()
258 kvm_pmu_release_perf_event(pmc); in kvm_pmu_stop_counter()
272 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
287 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
303 kvm_pmu_release_perf_event(&pmu->pmc[i]); in kvm_pmu_vcpu_destroy()
329 struct kvm_pmc *pmc; in kvm_pmu_enable_counter_mask() local
341 pmc = &pmu->pmc[i]; in kvm_pmu_enable_counter_mask()
347 /* At this point, pmc must be the canonical */ in kvm_pmu_enable_counter_mask()
348 if (pmc->perf_event) { in kvm_pmu_enable_counter_mask()
349 perf_event_enable(pmc->perf_event); in kvm_pmu_enable_counter_mask()
350 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) in kvm_pmu_enable_counter_mask()
367 struct kvm_pmc *pmc; in kvm_pmu_disable_counter_mask() local
376 pmc = &pmu->pmc[i]; in kvm_pmu_disable_counter_mask()
382 /* At this point, pmc must be the canonical */ in kvm_pmu_disable_counter_mask()
383 if (pmc->perf_event) in kvm_pmu_disable_counter_mask()
384 perf_event_disable(pmc->perf_event); in kvm_pmu_disable_counter_mask()
482 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
494 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_pmu_perf_overflow() local
496 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_perf_overflow()
497 int idx = pmc->idx; in kvm_pmu_perf_overflow()
508 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_perf_overflow()
568 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { in kvm_pmu_software_increment()
628 struct kvm_pmc *pmc; in kvm_pmu_create_perf_event() local
638 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]); in kvm_pmu_create_perf_event()
640 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
641 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx; in kvm_pmu_create_perf_event()
644 kvm_pmu_stop_counter(vcpu, pmc); in kvm_pmu_create_perf_event()
645 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
666 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); in kvm_pmu_create_perf_event()
673 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_create_perf_event()
675 if (kvm_pmu_pmc_is_chained(pmc)) { in kvm_pmu_create_perf_event()
686 pmc + 1); in kvm_pmu_create_perf_event()
689 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_create_perf_event()
695 kvm_pmu_perf_overflow, pmc); in kvm_pmu_create_perf_event()
704 pmc->perf_event = event; in kvm_pmu_create_perf_event()
718 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; in kvm_pmu_update_pmc_chained() local
721 old_state = kvm_pmu_pmc_is_chained(pmc); in kvm_pmu_update_pmc_chained()
722 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && in kvm_pmu_update_pmc_chained()
723 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); in kvm_pmu_update_pmc_chained()
728 canonical_pmc = kvm_pmu_get_canonical_pmc(pmc); in kvm_pmu_update_pmc_chained()
735 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc)); in kvm_pmu_update_pmc_chained()
736 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
739 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
748 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an