Lines Matching full:pmu

33 		WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);  in kvm_pmu_event_mask()
51 struct kvm_pmu *pmu; in kvm_pmc_to_vcpu() local
55 pmu = container_of(pmc, struct kvm_pmu, pmc[0]); in kvm_pmc_to_vcpu()
56 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); in kvm_pmc_to_vcpu()
62 * @pmc: The PMU counter pointer
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
82 * @pmc: The PMU counter pointer
124 * kvm_pmu_get_pair_counter_value - get PMU counter value
126 * @pmc: The PMU counter pointer
159 * kvm_pmu_get_counter_value - get PMU counter value
166 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_get_counter_value() local
167 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; in kvm_pmu_get_counter_value()
181 * kvm_pmu_set_counter_value - set PMU counter value
200 * @pmc: The PMU counter pointer
213 * kvm_pmu_stop_counter - stop PMU counter
214 * @pmc: The PMU counter pointer
245 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
252 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init() local
255 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
259 * kvm_pmu_vcpu_reset - reset pmu state for cpu
266 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_reset() local
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
272 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); in kvm_pmu_vcpu_reset()
276 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
283 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_destroy() local
286 kvm_pmu_release_perf_event(&pmu->pmc[i]); in kvm_pmu_vcpu_destroy()
287 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
302 * kvm_pmu_enable_counter_mask - enable selected PMU counters
311 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_enable_counter_mask() local
321 pmc = &pmu->pmc[i]; in kvm_pmu_enable_counter_mask()
337 * kvm_pmu_disable_counter_mask - disable selected PMU counters
346 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_disable_counter_mask() local
356 pmc = &pmu->pmc[i]; in kvm_pmu_disable_counter_mask()
384 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state() local
391 if (pmu->irq_level == overflow) in kvm_pmu_update_state()
394 pmu->irq_level = overflow; in kvm_pmu_update_state()
398 pmu->irq_num, overflow, pmu); in kvm_pmu_update_state()
405 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user() local
412 return pmu->irq_level != run_level; in kvm_pmu_should_notify_user()
416 * Reflect the PMU overflow interrupt output level into the kvm_run structure
424 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
429 * kvm_pmu_flush_hwstate - flush pmu state to cpu
432 * Check if the PMU has overflowed while we were running in the host, and inject
441 * kvm_pmu_sync_hwstate - sync pmu state from cpu
444 * Check if the PMU has overflowed while we were running in the guest, and
460 struct kvm_pmu *pmu; in kvm_pmu_perf_overflow_notify_vcpu() local
462 pmu = container_of(work, struct kvm_pmu, overflow_work); in kvm_pmu_perf_overflow_notify_vcpu()
463 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
476 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); in kvm_pmu_perf_overflow()
481 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); in kvm_pmu_perf_overflow()
504 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
507 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); in kvm_pmu_perf_overflow()
517 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_software_increment() local
546 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { in kvm_pmu_software_increment()
599 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_create_perf_event() local
610 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]); in kvm_pmu_create_perf_event()
671 pr_err_once("kvm: pmu event creation failed %ld\n", in kvm_pmu_create_perf_event()
689 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_pmc_chained() local
690 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; in kvm_pmu_update_pmc_chained()
708 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
711 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
746 struct arm_pmu *pmu; in kvm_pmu_probe_pmuver() local
752 * count anything. But it allows us to probe some of the PMU in kvm_pmu_probe_pmuver()
770 pr_err_once("kvm: pmu event creation failed %ld\n", in kvm_pmu_probe_pmuver()
775 if (event->pmu) { in kvm_pmu_probe_pmuver()
776 pmu = to_arm_pmu(event->pmu); in kvm_pmu_probe_pmuver()
777 if (pmu->pmuver) in kvm_pmu_probe_pmuver()
778 pmuver = pmu->pmuver; in kvm_pmu_probe_pmuver()
821 * a physical PMU and CONFIG_PERF_EVENT is selected. in kvm_arm_support_pmu_v3()
828 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
832 * A valid interrupt configuration for the PMU is either to have a in kvm_arm_pmu_v3_enable()
837 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
843 * the vgic will be initialized, so we can check the PMU irq in kvm_arm_pmu_v3_enable()
854 vcpu->arch.pmu.ready = true; in kvm_arm_pmu_v3_enable()
865 * If using the PMU with an in-kernel virtual GIC in kvm_arm_pmu_v3_init()
867 * initialized when initializing the PMU. in kvm_arm_pmu_v3_init()
875 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
876 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
881 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
884 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
903 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
906 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
920 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
940 /* The PMU overflow interrupt can be a PPI or a valid SPI. */ in kvm_arm_pmu_v3_set_attr()
950 kvm_debug("Set kvm ARM PMU irq: %d\n", irq); in kvm_arm_pmu_v3_set_attr()
951 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
1024 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()