Searched refs:x86_pmu (Results 1 – 14 of 14) sorted by relevance
/Linux-v5.4/arch/x86/events/intel/ |
D | lbr.c | 162 if (pmi && x86_pmu.version >= 4) in __intel_pmu_lbr_enable() 170 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; in __intel_pmu_lbr_enable() 201 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32() 202 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32() 209 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64() 210 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64() 211 wrmsrl(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64() 212 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) in intel_pmu_lbr_reset_64() 221 if (!x86_pmu.lbr_nr) in intel_pmu_lbr_reset() 224 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) in intel_pmu_lbr_reset() [all …]
|
D | core.c | 1974 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); in __intel_pmu_enable_all() 2214 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) in intel_pmu_enable_fixed() 2220 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { in intel_pmu_enable_fixed() 2298 if (!x86_pmu.num_counters) in intel_pmu_reset() 2305 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in intel_pmu_reset() 2309 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) in intel_pmu_reset() 2316 if (x86_pmu.version >= 2) { in intel_pmu_reset() 2322 if (x86_pmu.lbr_nr) { in intel_pmu_reset() 2368 if (x86_pmu.flags & PMU_FL_PEBS_ALL) in handle_pmi_common() 2378 x86_pmu.drain_pebs(regs); in handle_pmi_common() [all …]
|
D | ds.c | 180 if (x86_pmu.pebs_no_tlb) { in load_latency_data() 338 size_t bsiz = x86_pmu.pebs_buffer_size; in alloc_pebs_buffer() 342 if (!x86_pmu.pebs) in alloc_pebs_buffer() 353 if (x86_pmu.intel_cap.pebs_format < 2) { in alloc_pebs_buffer() 367 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); in alloc_pebs_buffer() 377 if (!x86_pmu.pebs) in release_pebs_buffer() 385 ds_clear_cea(cea, x86_pmu.pebs_buffer_size); in release_pebs_buffer() 386 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); in release_pebs_buffer() 397 if (!x86_pmu.bts) in alloc_bts_buffer() 424 if (!x86_pmu.bts) in release_bts_buffer() [all …]
|
D | p6.c | 201 static __initconst const struct x86_pmu p6_pmu = { 242 x86_pmu.attr_rdpmc_broken = 1; in p6_pmu_rdpmc_quirk() 243 x86_pmu.attr_rdpmc = 0; in p6_pmu_rdpmc_quirk() 249 x86_pmu = p6_pmu; in p6_pmu_init()
|
D | knc.c | 290 static const struct x86_pmu knc_pmu __initconst = { 316 x86_pmu = knc_pmu; in knc_pmu_init()
|
D | p4.c | 921 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_disable_all() 990 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_enable_all() 1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_handle_irq() 1028 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq() 1301 static __initconst const struct x86_pmu p4_pmu = { 1359 x86_pmu = p4_pmu; in p4_pmu_init() 1370 for (i = 0; i < x86_pmu.num_counters; i++) { in p4_pmu_init()
|
D | bts.c | 576 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) in bts_init()
|
/Linux-v5.4/arch/x86/events/ |
D | core.c | 46 struct x86_pmu x86_pmu __read_mostly; 71 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update() 121 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs() 124 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs() 151 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware() 156 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware() 167 i = x86_pmu.num_counters; in reserve_pmc_hardware() 180 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware() 204 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists() 218 if (x86_pmu.num_counters_fixed) { in check_hw_exists() [all …]
|
D | perf_event.h | 565 struct x86_pmu { struct 725 __quirk.next = x86_pmu.quirks; \ argument 726 x86_pmu.quirks = &__quirk; \ 765 extern struct x86_pmu x86_pmu __read_mostly; 769 return x86_pmu.lbr_sel_map && in x86_pmu_has_lbr_callstack() 770 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; in x86_pmu_has_lbr_callstack() 800 return x86_pmu.eventsel + (x86_pmu.addr_offset ? in x86_pmu_config_addr() 801 x86_pmu.addr_offset(index, true) : index); in x86_pmu_config_addr() 806 return x86_pmu.perfctr + (x86_pmu.addr_offset ? in x86_pmu_event_addr() 807 x86_pmu.addr_offset(index, false) : index); in x86_pmu_event_addr() [all …]
|
/Linux-v5.4/arch/x86/events/amd/ |
D | core.c | 377 for (i = 0; i < x86_pmu.num_counters; i++) { in __amd_put_nb_event_constraints() 444 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { in __amd_get_nb_event_constraints() 487 for (i = 0; i < x86_pmu.num_counters; i++) { in amd_alloc_nb() 500 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_prepare() 519 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_starting() 545 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_dead() 581 if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) in amd_pmu_wait_on_overflow() 610 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in amd_pmu_disable_all() 875 static __initconst const struct x86_pmu amd_pmu = { 919 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; in amd_core_pmu_init() [all …]
|
/Linux-v5.4/arch/x86/kvm/vmx/ |
D | pmu_intel.c | 265 struct x86_pmu_capability x86_pmu; in intel_pmu_refresh() local 287 perf_get_x86_pmu_capability(&x86_pmu); in intel_pmu_refresh() 290 x86_pmu.num_counters_gp); in intel_pmu_refresh() 300 x86_pmu.num_counters_fixed); in intel_pmu_refresh()
|
/Linux-v5.4/arch/x86/xen/ |
D | pmu.c | 501 if (x86_pmu.handle_irq(®s)) in xen_pmu_irq_handler()
|
/Linux-v5.4/Documentation/riscv/ |
D | pmu.rst | 85 hwc->sample_period = x86_pmu.max_period;
|
/Linux-v5.4/arch/x86/kvm/ |
D | x86.c | 5124 struct x86_pmu_capability x86_pmu; in kvm_init_msr_list() local 5131 perf_get_x86_pmu_capability(&x86_pmu); in kvm_init_msr_list() 5179 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) in kvm_init_msr_list() 5184 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) in kvm_init_msr_list()
|