Lines Matching full:pmc

33  * - There are three types of index to access perf counters (PMC):
41 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
42 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
44 * between pmc and perf counters is as the following:
62 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow() local
63 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow()
65 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { in kvm_perf_overflow()
66 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow()
67 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
75 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow_intr() local
76 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow_intr()
78 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { in kvm_perf_overflow_intr()
79 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow_intr()
80 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow_intr()
91 irq_work_queue(&pmc_to_pmu(pmc)->irq_work); in kvm_perf_overflow_intr()
93 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in kvm_perf_overflow_intr()
97 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, in pmc_reprogram_counter() argument
114 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
130 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
132 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", in pmc_reprogram_counter()
133 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
137 pmc->perf_event = event; in pmc_reprogram_counter()
138 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
139 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); in pmc_reprogram_counter()
142 static void pmc_pause_counter(struct kvm_pmc *pmc) in pmc_pause_counter() argument
144 u64 counter = pmc->counter; in pmc_pause_counter()
146 if (!pmc->perf_event) in pmc_pause_counter()
150 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
151 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
154 static bool pmc_resume_counter(struct kvm_pmc *pmc) in pmc_resume_counter() argument
156 if (!pmc->perf_event) in pmc_resume_counter()
160 if (perf_event_period(pmc->perf_event, in pmc_resume_counter()
161 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
165 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
167 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); in pmc_resume_counter()
171 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) in reprogram_gp_counter() argument
175 struct kvm *kvm = pmc->vcpu->kvm; in reprogram_gp_counter()
183 pmc->eventsel = eventsel; in reprogram_gp_counter()
185 pmc_pause_counter(pmc); in reprogram_gp_counter()
187 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) in reprogram_gp_counter()
214 config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc), in reprogram_gp_counter()
224 if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) in reprogram_gp_counter()
227 pmc_release_perf_event(pmc); in reprogram_gp_counter()
229 pmc->current_config = eventsel; in reprogram_gp_counter()
230 pmc_reprogram_counter(pmc, type, config, in reprogram_gp_counter()
239 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) in reprogram_fixed_counter() argument
244 struct kvm *kvm = pmc->vcpu->kvm; in reprogram_fixed_counter()
246 pmc_pause_counter(pmc); in reprogram_fixed_counter()
248 if (!en_field || !pmc_is_enabled(pmc)) in reprogram_fixed_counter()
261 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc)) in reprogram_fixed_counter()
264 pmc_release_perf_event(pmc); in reprogram_fixed_counter()
266 pmc->current_config = (u64)ctrl; in reprogram_fixed_counter()
267 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, in reprogram_fixed_counter()
277 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); in reprogram_counter() local
279 if (!pmc) in reprogram_counter()
282 if (pmc_is_gp(pmc)) in reprogram_counter()
283 reprogram_gp_counter(pmc, pmc->eventsel); in reprogram_counter()
288 reprogram_fixed_counter(pmc, ctrl, idx); in reprogram_counter()
299 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit); in kvm_pmu_handle_event() local
301 if (unlikely(!pmc || !pmc->perf_event)) { in kvm_pmu_handle_event()
362 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
371 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask); in kvm_pmu_rdpmc()
372 if (!pmc) in kvm_pmu_rdpmc()
380 *data = pmc_read_counter(pmc) & mask; in kvm_pmu_rdpmc()
399 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr); in kvm_pmu_mark_pmc_in_use() local
401 if (pmc) in kvm_pmu_mark_pmc_in_use()
402 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
445 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) in pmc_speculative_in_use() argument
447 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_speculative_in_use()
449 if (pmc_is_fixed(pmc)) in pmc_speculative_in_use()
451 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; in pmc_speculative_in_use()
453 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; in pmc_speculative_in_use()
460 struct kvm_pmc *pmc = NULL; in kvm_pmu_cleanup() local
470 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i); in kvm_pmu_cleanup()
472 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) in kvm_pmu_cleanup()
473 pmc_stop_counter(pmc); in kvm_pmu_cleanup()