| /Linux-v5.15/arch/x86/events/intel/ |
| D | lbr.c | 197 if (pmi && x86_pmu.version >= 4) in __intel_pmu_lbr_enable() 205 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; in __intel_pmu_lbr_enable() 249 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32() 250 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32() 257 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64() 258 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64() 259 wrmsrl(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64() 260 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) in intel_pmu_lbr_reset_64() 261 wrmsrl(x86_pmu.lbr_info + i, 0); in intel_pmu_lbr_reset_64() 268 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); in intel_pmu_arch_lbr_reset() [all …]
|
| D | core.c | 2427 if (left == x86_pmu.max_period) { in icl_set_topdown_event_period() 2591 x86_pmu.num_topdown_events - 1); in icl_update_topdown_event() 2615 x86_pmu.update_topdown_event(event); in intel_pmu_read_topdown_event() 2623 else if (is_topdown_count(event) && x86_pmu.update_topdown_event) in intel_pmu_read_event() 2664 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) in intel_pmu_enable_fixed() 2671 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { in intel_pmu_enable_fixed() 2772 if (x86_pmu.version >= 2) { in intel_pmu_reset() 2778 if (x86_pmu.lbr_nr) { in intel_pmu_reset() 2825 if (x86_pmu.flags & PMU_FL_PEBS_ALL) in handle_pmi_common() 2837 x86_pmu.drain_pebs(regs, &data); in handle_pmi_common() [all …]
|
| D | ds.c | 189 if (x86_pmu.pebs_no_tlb) { in load_latency_data() 212 if (!x86_pmu.pebs_block) { in load_latency_data() 404 size_t bsiz = x86_pmu.pebs_buffer_size; in alloc_pebs_buffer() 408 if (!x86_pmu.pebs) in alloc_pebs_buffer() 419 if (x86_pmu.intel_cap.pebs_format < 2) { in alloc_pebs_buffer() 433 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); in alloc_pebs_buffer() 443 if (!x86_pmu.pebs) in release_pebs_buffer() 451 ds_clear_cea(cea, x86_pmu.pebs_buffer_size); in release_pebs_buffer() 452 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); in release_pebs_buffer() 463 if (!x86_pmu.bts) in alloc_bts_buffer() [all …]
|
| D | p6.c | 201 static __initconst const struct x86_pmu p6_pmu = { 242 x86_pmu.attr_rdpmc_broken = 1; in p6_pmu_rdpmc_quirk() 243 x86_pmu.attr_rdpmc = 0; in p6_pmu_rdpmc_quirk() 249 x86_pmu = p6_pmu; in p6_pmu_init()
|
| D | knc.c | 290 static const struct x86_pmu knc_pmu __initconst = { 316 x86_pmu = knc_pmu; in knc_pmu_init()
|
| D | p4.c | 922 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_disable_all() 1001 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_enable_all() 1020 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_handle_irq() 1039 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq() 1312 static __initconst const struct x86_pmu p4_pmu = { 1370 x86_pmu = p4_pmu; in p4_pmu_init() 1381 for (i = 0; i < x86_pmu.num_counters; i++) { in p4_pmu_init()
|
| D | bts.c | 584 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) in bts_init()
|
| /Linux-v5.15/arch/x86/events/ |
| D | core.c | 47 struct x86_pmu x86_pmu __read_mostly; 63 DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq); 64 DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all); 65 DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all); 66 DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable); 67 DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable); 69 DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add); 70 DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del); 71 DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read); 73 DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events); [all …]
|
| D | perf_event.h | 675 typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ 695 bool __Fp = x86_pmu._field; \ 718 struct x86_pmu { struct 962 __quirk.next = x86_pmu.quirks; \ 963 x86_pmu.quirks = &__quirk; \ 1021 extern struct x86_pmu x86_pmu __read_mostly; 1033 return x86_pmu.lbr_sel_map && in x86_pmu_has_lbr_callstack() 1034 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; in x86_pmu_has_lbr_callstack() 1064 return x86_pmu.eventsel + (x86_pmu.addr_offset ? in x86_pmu_config_addr() 1065 x86_pmu.addr_offset(index, true) : index); in x86_pmu_config_addr() [all …]
|
| /Linux-v5.15/arch/x86/events/zhaoxin/ |
| D | core.c | 262 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); in zhaoxin_pmu_enable_all() 370 if (x86_pmu.enabled_ack) in zhaoxin_pmu_handle_irq() 427 if (x86_pmu.event_constraints) { in zhaoxin_get_event_constraints() 428 for_each_event_constraint(c, x86_pmu.event_constraints) { in zhaoxin_get_event_constraints() 459 static const struct x86_pmu zhaoxin_pmu __initconst = { 498 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) { in zhaoxin_arch_events_quirk() 529 x86_pmu = zhaoxin_pmu; in zhaoxin_pmu_init() 532 x86_pmu.version = version; in zhaoxin_pmu_init() 533 x86_pmu.num_counters = eax.split.num_counters; in zhaoxin_pmu_init() 534 x86_pmu.cntval_bits = eax.split.bit_width; in zhaoxin_pmu_init() [all …]
|
| /Linux-v5.15/arch/x86/events/amd/ |
| D | core.c | 319 if (!(x86_pmu.flags & PMU_FL_PAIR)) in amd_is_pair_event_code() 343 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw)) in amd_core_hw_config() 396 for (i = 0; i < x86_pmu.num_counters; i++) { in __amd_put_nb_event_constraints() 463 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { in __amd_get_nb_event_constraints() 506 for (i = 0; i < x86_pmu.num_counters; i++) { in amd_alloc_nb() 519 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_prepare() 538 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_starting() 564 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_dead() 600 if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) in amd_pmu_wait_on_overflow() 629 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in amd_pmu_disable_all() [all …]
|
| /Linux-v5.15/arch/x86/kvm/vmx/ |
| D | pmu_intel.c | 469 struct x86_pmu_capability x86_pmu; in intel_pmu_refresh() local 491 perf_get_x86_pmu_capability(&x86_pmu); in intel_pmu_refresh() 494 x86_pmu.num_counters_gp); in intel_pmu_refresh() 495 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp); in intel_pmu_refresh() 497 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len); in intel_pmu_refresh() 506 x86_pmu.num_counters_fixed); in intel_pmu_refresh() 508 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); in intel_pmu_refresh()
|
| /Linux-v5.15/Documentation/translations/zh_CN/riscv/ |
| D | pmu.rst | 84 hwc->sample_period = x86_pmu.max_period;
|
| /Linux-v5.15/arch/x86/xen/ |
| D | pmu.c | 501 if (x86_pmu.handle_irq(®s)) in xen_pmu_irq_handler()
|
| /Linux-v5.15/Documentation/riscv/ |
| D | pmu.rst | 85 hwc->sample_period = x86_pmu.max_period;
|
| /Linux-v5.15/arch/x86/kvm/ |
| D | x86.c | 6183 struct x86_pmu_capability x86_pmu; in kvm_init_msr_list() local 6190 perf_get_x86_pmu_capability(&x86_pmu); in kvm_init_msr_list() 6243 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) in kvm_init_msr_list() 6248 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) in kvm_init_msr_list()
|