Lines Matching +full:interrupt +full:- +full:counter
1 // SPDX-License-Identifier: GPL-2.0-only
25 switch (kvm->arch.pmuver) { in kvm_pmu_event_mask()
33 WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); in kvm_pmu_event_mask()
39 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
41 * @select_idx: The counter index
54 pmc -= pmc->idx; in kvm_pmc_to_vcpu()
61 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
62 * @pmc: The PMU counter pointer
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
72 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
73 * @select_idx: The counter index
81 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
82 * @pmc: The PMU counter pointer
84 * When a pair of PMCs are chained together we use the low counter (canonical)
90 kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_canonical_pmc()
91 return pmc - 1; in kvm_pmu_get_canonical_pmc()
97 if (kvm_pmu_idx_is_high_counter(pmc->idx)) in kvm_pmu_get_alternate_pmc()
98 return pmc - 1; in kvm_pmu_get_alternate_pmc()
104 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
106 * @select_idx: The counter index
118 eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_idx_has_chain_evtype()
124 * kvm_pmu_get_pair_counter_value - get PMU counter value
126 * @pmc: The PMU counter pointer
131 u64 counter, counter_high, reg, enabled, running; in kvm_pmu_get_pair_counter_value() local
135 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
137 counter = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_get_pair_counter_value()
140 counter = lower_32_bits(counter) | (counter_high << 32); in kvm_pmu_get_pair_counter_value()
142 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_get_pair_counter_value()
143 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_get_pair_counter_value()
144 counter = __vcpu_sys_reg(vcpu, reg); in kvm_pmu_get_pair_counter_value()
148 * The real counter value is equal to the value of counter register plus in kvm_pmu_get_pair_counter_value()
151 if (pmc->perf_event) in kvm_pmu_get_pair_counter_value()
152 counter += perf_event_read_value(pmc->perf_event, &enabled, in kvm_pmu_get_pair_counter_value()
155 return counter; in kvm_pmu_get_pair_counter_value()
159 * kvm_pmu_get_counter_value - get PMU counter value
161 * @select_idx: The counter index
165 u64 counter; in kvm_pmu_get_counter_value() local
166 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_get_counter_value()
167 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; in kvm_pmu_get_counter_value()
169 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_get_counter_value()
173 counter = upper_32_bits(counter); in kvm_pmu_get_counter_value()
175 counter = lower_32_bits(counter); in kvm_pmu_get_counter_value()
177 return counter; in kvm_pmu_get_counter_value()
181 * kvm_pmu_set_counter_value - set PMU counter value
183 * @select_idx: The counter index
184 * @val: The counter value
192 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); in kvm_pmu_set_counter_value()
199 * kvm_pmu_release_perf_event - remove the perf event
200 * @pmc: The PMU counter pointer
205 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
206 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
207 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
208 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
213 * kvm_pmu_stop_counter - stop PMU counter
214 * @pmc: The PMU counter pointer
216 * If this counter has been configured to monitor some event, release it here.
220 u64 counter, reg, val; in kvm_pmu_stop_counter() local
223 if (!pmc->perf_event) in kvm_pmu_stop_counter()
226 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_stop_counter()
228 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) { in kvm_pmu_stop_counter()
230 val = counter; in kvm_pmu_stop_counter()
232 reg = PMEVCNTR0_EL0 + pmc->idx; in kvm_pmu_stop_counter()
233 val = lower_32_bits(counter); in kvm_pmu_stop_counter()
239 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter); in kvm_pmu_stop_counter()
245 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
252 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init()
255 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
259 * kvm_pmu_vcpu_reset - reset pmu state for cpu
266 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_reset()
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
272 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); in kvm_pmu_vcpu_reset()
276 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
283 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_destroy()
286 kvm_pmu_release_perf_event(&pmu->pmc[i]); in kvm_pmu_vcpu_destroy()
287 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
298 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); in kvm_pmu_valid_counter_mask()
302 * kvm_pmu_enable_counter_mask - enable selected PMU counters
311 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_enable_counter_mask()
321 pmc = &pmu->pmc[i]; in kvm_pmu_enable_counter_mask()
328 if (pmc->perf_event) { in kvm_pmu_enable_counter_mask()
329 perf_event_enable(pmc->perf_event); in kvm_pmu_enable_counter_mask()
330 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) in kvm_pmu_enable_counter_mask()
337 * kvm_pmu_disable_counter_mask - disable selected PMU counters
346 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_disable_counter_mask()
356 pmc = &pmu->pmc[i]; in kvm_pmu_disable_counter_mask()
363 if (pmc->perf_event) in kvm_pmu_disable_counter_mask()
364 perf_event_disable(pmc->perf_event); in kvm_pmu_disable_counter_mask()
383 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state()
390 if (pmu->irq_level == overflow) in kvm_pmu_update_state()
393 pmu->irq_level = overflow; in kvm_pmu_update_state()
395 if (likely(irqchip_in_kernel(vcpu->kvm))) { in kvm_pmu_update_state()
396 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_pmu_update_state()
397 pmu->irq_num, overflow, pmu); in kvm_pmu_update_state()
404 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user()
405 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_pmu_should_notify_user()
406 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; in kvm_pmu_should_notify_user()
408 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_pmu_should_notify_user()
411 return pmu->irq_level != run_level; in kvm_pmu_should_notify_user()
415 * Reflect the PMU overflow interrupt output level into the kvm_run structure
419 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_pmu_update_run()
422 regs->device_irq_level &= ~KVM_ARM_DEV_PMU; in kvm_pmu_update_run()
423 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
424 regs->device_irq_level |= KVM_ARM_DEV_PMU; in kvm_pmu_update_run()
428 * kvm_pmu_flush_hwstate - flush pmu state to cpu
432 * an interrupt if that was the case.
440 * kvm_pmu_sync_hwstate - sync pmu state from cpu
444 * inject an interrupt if that was the case.
452 * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
462 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
474 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_pmu_perf_overflow()
475 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); in kvm_pmu_perf_overflow()
477 int idx = pmc->idx; in kvm_pmu_perf_overflow()
480 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); in kvm_pmu_perf_overflow()
484 * i.e. the point where the counter overflows. in kvm_pmu_perf_overflow()
486 period = -(local64_read(&perf_event->count)); in kvm_pmu_perf_overflow()
488 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_perf_overflow()
491 local64_set(&perf_event->hw.period_left, 0); in kvm_pmu_perf_overflow()
492 perf_event->attr.sample_period = period; in kvm_pmu_perf_overflow()
493 perf_event->hw.sample_period = period; in kvm_pmu_perf_overflow()
503 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
506 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); in kvm_pmu_perf_overflow()
510 * kvm_pmu_software_increment - do software increment
516 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_software_increment()
533 type &= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_software_increment()
537 /* increment this even SW_INC counter */ in kvm_pmu_software_increment()
545 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { in kvm_pmu_software_increment()
546 /* increment the high counter */ in kvm_pmu_software_increment()
550 if (!reg) /* mark overflow on the high counter */ in kvm_pmu_software_increment()
553 /* mark overflow on low counter */ in kvm_pmu_software_increment()
560 * kvm_pmu_handle_pmcr - handle PMCR register
594 * kvm_pmu_create_perf_event - create a perf event for a counter
596 * @select_idx: The number of selected counter
600 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_create_perf_event()
604 u64 eventsel, counter, reg, data; in kvm_pmu_create_perf_event() local
608 * obtained from the low/even counter. We also use this counter to in kvm_pmu_create_perf_event()
611 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]); in kvm_pmu_create_perf_event()
613 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
614 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx; in kvm_pmu_create_perf_event()
618 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
621 eventsel = data & kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_create_perf_event()
631 if (vcpu->kvm->arch.pmu_filter && in kvm_pmu_create_perf_event()
632 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) in kvm_pmu_create_perf_event()
639 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); in kvm_pmu_create_perf_event()
646 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); in kvm_pmu_create_perf_event()
652 * high counter. in kvm_pmu_create_perf_event()
654 attr.sample_period = (-counter) & GENMASK(63, 0); in kvm_pmu_create_perf_event()
657 event = perf_event_create_kernel_counter(&attr, -1, current, in kvm_pmu_create_perf_event()
662 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) in kvm_pmu_create_perf_event()
663 attr.sample_period = (-counter) & GENMASK(63, 0); in kvm_pmu_create_perf_event()
665 attr.sample_period = (-counter) & GENMASK(31, 0); in kvm_pmu_create_perf_event()
667 event = perf_event_create_kernel_counter(&attr, -1, current, in kvm_pmu_create_perf_event()
677 pmc->perf_event = event; in kvm_pmu_create_perf_event()
681 * kvm_pmu_update_pmc_chained - update chained bitmap
683 * @select_idx: The number of selected counter
690 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_pmc_chained()
691 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; in kvm_pmu_update_pmc_chained()
695 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && in kvm_pmu_update_pmc_chained()
696 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); in kvm_pmu_update_pmc_chained()
706 * the adjacent counter is stopped and its event destroyed in kvm_pmu_update_pmc_chained()
709 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
712 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
716 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
719 * @select_idx: The number of selected counter
732 mask |= kvm_pmu_event_mask(vcpu->kvm); in kvm_pmu_set_counter_event_type()
745 if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF && in kvm_host_pmu_init()
774 event = perf_event_create_kernel_counter(&attr, -1, current, in kvm_pmu_probe_pmuver()
783 if (event->pmu) { in kvm_pmu_probe_pmuver()
784 pmu = to_arm_pmu(event->pmu); in kvm_pmu_probe_pmuver()
785 if (pmu->pmuver) in kvm_pmu_probe_pmuver()
786 pmuver = pmu->pmuver; in kvm_pmu_probe_pmuver()
797 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; in kvm_pmu_get_pmceid()
810 if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4) in kvm_pmu_get_pmceid()
811 val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); in kvm_pmu_get_pmceid()
818 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_pmu_get_pmceid()
839 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
840 return -EINVAL; in kvm_arm_pmu_v3_enable()
843 * A valid interrupt configuration for the PMU is either to have a in kvm_arm_pmu_v3_enable()
844 * properly configured interrupt number and using an in-kernel in kvm_arm_pmu_v3_enable()
845 * irqchip, or to not have an in-kernel GIC and not set an IRQ. in kvm_arm_pmu_v3_enable()
847 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_enable()
848 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
850 * If we are using an in-kernel vgic, at this point we know in kvm_arm_pmu_v3_enable()
855 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) in kvm_arm_pmu_v3_enable()
856 return -EINVAL; in kvm_arm_pmu_v3_enable()
858 return -EINVAL; in kvm_arm_pmu_v3_enable()
861 /* One-off reload of the PMU on first run */ in kvm_arm_pmu_v3_enable()
869 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_init()
873 * If using the PMU with an in-kernel virtual GIC in kvm_arm_pmu_v3_init()
877 if (!vgic_initialized(vcpu->kvm)) in kvm_arm_pmu_v3_init()
878 return -ENODEV; in kvm_arm_pmu_v3_init()
881 return -ENXIO; in kvm_arm_pmu_v3_init()
883 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
884 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
889 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
892 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
897 * For one VM the interrupt type must be same for each vcpu.
898 * As a PPI, the interrupt number is the same for all vcpus,
911 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
914 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
925 return -ENODEV; in kvm_arm_pmu_v3_set_attr()
927 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
928 return -EBUSY; in kvm_arm_pmu_v3_set_attr()
930 if (!vcpu->kvm->arch.pmuver) in kvm_arm_pmu_v3_set_attr()
931 vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver(); in kvm_arm_pmu_v3_set_attr()
933 if (vcpu->kvm->arch.pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) in kvm_arm_pmu_v3_set_attr()
934 return -ENODEV; in kvm_arm_pmu_v3_set_attr()
936 switch (attr->attr) { in kvm_arm_pmu_v3_set_attr()
938 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
941 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_set_attr()
942 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
945 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
947 /* The PMU overflow interrupt can be a PPI or a valid SPI. */ in kvm_arm_pmu_v3_set_attr()
949 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
951 if (!pmu_irq_is_valid(vcpu->kvm, irq)) in kvm_arm_pmu_v3_set_attr()
952 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
955 return -EBUSY; in kvm_arm_pmu_v3_set_attr()
958 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
966 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; in kvm_arm_pmu_v3_set_attr()
968 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; in kvm_arm_pmu_v3_set_attr()
971 return -EFAULT; in kvm_arm_pmu_v3_set_attr()
976 return -EINVAL; in kvm_arm_pmu_v3_set_attr()
978 mutex_lock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
980 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
981 vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL); in kvm_arm_pmu_v3_set_attr()
982 if (!vcpu->kvm->arch.pmu_filter) { in kvm_arm_pmu_v3_set_attr()
983 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
984 return -ENOMEM; in kvm_arm_pmu_v3_set_attr()
994 bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
996 bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events); in kvm_arm_pmu_v3_set_attr()
1000 bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1002 bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); in kvm_arm_pmu_v3_set_attr()
1004 mutex_unlock(&vcpu->kvm->lock); in kvm_arm_pmu_v3_set_attr()
1012 return -ENXIO; in kvm_arm_pmu_v3_set_attr()
1017 switch (attr->attr) { in kvm_arm_pmu_v3_get_attr()
1019 int __user *uaddr = (int __user *)(long)attr->addr; in kvm_arm_pmu_v3_get_attr()
1022 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_get_attr()
1023 return -EINVAL; in kvm_arm_pmu_v3_get_attr()
1026 return -ENODEV; in kvm_arm_pmu_v3_get_attr()
1029 return -ENXIO; in kvm_arm_pmu_v3_get_attr()
1031 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()
1036 return -ENXIO; in kvm_arm_pmu_v3_get_attr()
1041 switch (attr->attr) { in kvm_arm_pmu_v3_has_attr()
1049 return -ENXIO; in kvm_arm_pmu_v3_has_attr()