Searched refs:x86_pmu (Results 1 – 12 of 12) sorted by relevance
162 if (pmi && x86_pmu.version >= 4) in __intel_pmu_lbr_enable()170 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; in __intel_pmu_lbr_enable()201 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32()202 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32()209 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64()210 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64()211 wrmsrl(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64()212 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) in intel_pmu_lbr_reset_64()221 if (!x86_pmu.lbr_nr) in intel_pmu_lbr_reset()224 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) in intel_pmu_lbr_reset()[all …]
1899 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); in __intel_pmu_enable_all()2092 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) in intel_pmu_enable_fixed()2171 if (!x86_pmu.num_counters) in intel_pmu_reset()2178 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in intel_pmu_reset()2182 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) in intel_pmu_reset()2189 if (x86_pmu.version >= 2) { in intel_pmu_reset()2195 if (x86_pmu.lbr_nr) { in intel_pmu_reset()2227 if (!x86_pmu.late_ack) in intel_pmu_handle_irq()2285 if (x86_pmu.flags & PMU_FL_PEBS_ALL) in intel_pmu_handle_irq()2295 x86_pmu.drain_pebs(regs); in intel_pmu_handle_irq()[all …]
180 if (x86_pmu.pebs_no_tlb) { in load_latency_data()338 size_t bsiz = x86_pmu.pebs_buffer_size; in alloc_pebs_buffer()342 if (!x86_pmu.pebs) in alloc_pebs_buffer()353 if (x86_pmu.intel_cap.pebs_format < 2) { in alloc_pebs_buffer()367 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); in alloc_pebs_buffer()377 if (!x86_pmu.pebs) in release_pebs_buffer()385 ds_clear_cea(cea, x86_pmu.pebs_buffer_size); in release_pebs_buffer()386 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); in release_pebs_buffer()397 if (!x86_pmu.bts) in alloc_bts_buffer()424 if (!x86_pmu.bts) in release_bts_buffer()[all …]
201 static __initconst const struct x86_pmu p6_pmu = {242 x86_pmu.attr_rdpmc_broken = 1; in p6_pmu_rdpmc_quirk()243 x86_pmu.attr_rdpmc = 0; in p6_pmu_rdpmc_quirk()249 x86_pmu = p6_pmu; in p6_pmu_init()
290 static const struct x86_pmu knc_pmu __initconst = {316 x86_pmu = knc_pmu; in knc_pmu_init()
921 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_disable_all()990 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_enable_all()1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_handle_irq()1028 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq()1301 static __initconst const struct x86_pmu p4_pmu = {1359 x86_pmu = p4_pmu; in p4_pmu_init()1370 for (i = 0; i < x86_pmu.num_counters; i++) { in p4_pmu_init()
582 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) in bts_init()
46 struct x86_pmu x86_pmu __read_mostly;71 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()121 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs()124 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs()151 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()156 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()167 i = x86_pmu.num_counters; in reserve_pmc_hardware()180 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware()204 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists()218 if (x86_pmu.num_counters_fixed) { in check_hw_exists()[all …]
513 struct x86_pmu { struct665 __quirk.next = x86_pmu.quirks; \ argument666 x86_pmu.quirks = &__quirk; \703 extern struct x86_pmu x86_pmu __read_mostly;707 return x86_pmu.lbr_sel_map && in x86_pmu_has_lbr_callstack()708 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; in x86_pmu_has_lbr_callstack()738 return x86_pmu.eventsel + (x86_pmu.addr_offset ? in x86_pmu_config_addr()739 x86_pmu.addr_offset(index, true) : index); in x86_pmu_config_addr()744 return x86_pmu.perfctr + (x86_pmu.addr_offset ? in x86_pmu_event_addr()745 x86_pmu.addr_offset(index, false) : index); in x86_pmu_event_addr()[all …]
249 for (i = 0; i < x86_pmu.num_counters; i++) { in __amd_put_nb_event_constraints()316 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { in __amd_get_nb_event_constraints()359 for (i = 0; i < x86_pmu.num_counters; i++) { in amd_alloc_nb()372 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_prepare()391 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_starting()417 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_dead()622 static __initconst const struct x86_pmu amd_pmu = {663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; in amd_core_pmu_init()682 x86_pmu.eventsel = MSR_F15H_PERF_CTL; in amd_core_pmu_init()683 x86_pmu.perfctr = MSR_F15H_PERF_CTR; in amd_core_pmu_init()[all …]
494 if (x86_pmu.handle_irq(®s)) in xen_pmu_irq_handler()
83 hwc->sample_period = x86_pmu.max_period;