Lines Matching full:pmc
45 * - There are three types of index to access perf counters (PMC):
55 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
56 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
58 * between pmc and perf counters is as the following:
86 static inline bool pmc_is_enabled(struct kvm_pmc *pmc) in pmc_is_enabled() argument
88 return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); in pmc_is_enabled()
99 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) in __kvm_perf_overflow() argument
101 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in __kvm_perf_overflow()
105 if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) in __kvm_perf_overflow()
108 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { in __kvm_perf_overflow()
123 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
125 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in __kvm_perf_overflow()
127 if (!pmc->intr || skip_pmi) in __kvm_perf_overflow()
138 if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu)) in __kvm_perf_overflow()
139 irq_work_queue(&pmc_to_pmu(pmc)->irq_work); in __kvm_perf_overflow()
141 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in __kvm_perf_overflow()
148 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow() local
150 __kvm_perf_overflow(pmc, true); in kvm_perf_overflow()
153 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, in pmc_reprogram_counter() argument
157 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_reprogram_counter()
169 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); in pmc_reprogram_counter()
171 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
174 guest_cpuid_is_intel(pmc->vcpu)) { in pmc_reprogram_counter()
198 if (x86_match_cpu(vmx_icl_pebs_cpu) && pmc->idx == 32) in pmc_reprogram_counter()
203 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
205 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", in pmc_reprogram_counter()
206 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
210 pmc->perf_event = event; in pmc_reprogram_counter()
211 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
212 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); in pmc_reprogram_counter()
213 pmc->is_paused = false; in pmc_reprogram_counter()
214 pmc->intr = intr || pebs; in pmc_reprogram_counter()
217 static void pmc_pause_counter(struct kvm_pmc *pmc) in pmc_pause_counter() argument
219 u64 counter = pmc->counter; in pmc_pause_counter()
221 if (!pmc->perf_event || pmc->is_paused) in pmc_pause_counter()
225 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
226 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
227 pmc->is_paused = true; in pmc_pause_counter()
230 static bool pmc_resume_counter(struct kvm_pmc *pmc) in pmc_resume_counter() argument
232 if (!pmc->perf_event) in pmc_resume_counter()
236 if (perf_event_period(pmc->perf_event, in pmc_resume_counter()
237 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
240 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) != in pmc_resume_counter()
241 (!!pmc->perf_event->attr.precise_ip)) in pmc_resume_counter()
245 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
246 pmc->is_paused = false; in pmc_resume_counter()
248 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); in pmc_resume_counter()
260 static bool check_pmu_event_filter(struct kvm_pmc *pmc) in check_pmu_event_filter() argument
263 struct kvm *kvm = pmc->vcpu->kvm; in check_pmu_event_filter()
268 if (!static_call(kvm_x86_pmu_hw_event_available)(pmc)) in check_pmu_event_filter()
275 if (pmc_is_gp(pmc)) { in check_pmu_event_filter()
276 key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB; in check_pmu_event_filter()
283 idx = pmc->idx - INTEL_PMC_IDX_FIXED; in check_pmu_event_filter()
296 void reprogram_counter(struct kvm_pmc *pmc) in reprogram_counter() argument
298 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in reprogram_counter()
299 u64 eventsel = pmc->eventsel; in reprogram_counter()
303 pmc_pause_counter(pmc); in reprogram_counter()
305 if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc)) in reprogram_counter()
308 if (!check_pmu_event_filter(pmc)) in reprogram_counter()
314 if (pmc_is_fixed(pmc)) { in reprogram_counter()
316 pmc->idx - INTEL_PMC_IDX_FIXED); in reprogram_counter()
326 if (pmc->current_config == new_config && pmc_resume_counter(pmc)) in reprogram_counter()
329 pmc_release_perf_event(pmc); in reprogram_counter()
331 pmc->current_config = new_config; in reprogram_counter()
332 pmc_reprogram_counter(pmc, PERF_TYPE_RAW, in reprogram_counter()
346 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit); in kvm_pmu_handle_event() local
348 if (unlikely(!pmc || !pmc->perf_event)) { in kvm_pmu_handle_event()
352 reprogram_counter(pmc); in kvm_pmu_handle_event()
408 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
417 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask); in kvm_pmu_rdpmc()
418 if (!pmc) in kvm_pmu_rdpmc()
426 *data = pmc_read_counter(pmc) & mask; in kvm_pmu_rdpmc()
447 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr); in kvm_pmu_mark_pmc_in_use() local
449 if (pmc) in kvm_pmu_mark_pmc_in_use()
450 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
497 struct kvm_pmc *pmc = NULL; in kvm_pmu_cleanup() local
507 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); in kvm_pmu_cleanup()
509 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) in kvm_pmu_cleanup()
510 pmc_stop_counter(pmc); in kvm_pmu_cleanup()
523 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) in kvm_pmu_incr_counter() argument
527 prev_count = pmc->counter; in kvm_pmu_incr_counter()
528 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc); in kvm_pmu_incr_counter()
530 reprogram_counter(pmc); in kvm_pmu_incr_counter()
531 if (pmc->counter < prev_count) in kvm_pmu_incr_counter()
532 __kvm_perf_overflow(pmc, false); in kvm_pmu_incr_counter()
535 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, in eventsel_match_perf_hw_id() argument
538 return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) & in eventsel_match_perf_hw_id()
542 static inline bool cpl_is_matched(struct kvm_pmc *pmc) in cpl_is_matched() argument
545 u64 config = pmc->current_config; in cpl_is_matched()
547 if (pmc_is_gp(pmc)) { in cpl_is_matched()
555 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user; in cpl_is_matched()
561 struct kvm_pmc *pmc; in kvm_pmu_trigger_event() local
565 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); in kvm_pmu_trigger_event()
567 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc)) in kvm_pmu_trigger_event()
571 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc)) in kvm_pmu_trigger_event()
572 kvm_pmu_incr_counter(pmc); in kvm_pmu_trigger_event()