Lines Matching refs:x86_pmu

46 struct x86_pmu x86_pmu __read_mostly;
71 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
121 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs()
124 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
151 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
156 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
167 i = x86_pmu.num_counters; in reserve_pmc_hardware()
180 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware()
204 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists()
218 if (x86_pmu.num_counters_fixed) { in check_hw_exists()
223 for (i = 0; i < x86_pmu.num_counters_fixed; i++) { in check_hw_exists()
296 return x86_pmu.handle_irq != NULL; in x86_pmu_initialized()
377 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) in x86_add_exclusive()
380 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { in x86_add_exclusive()
382 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { in x86_add_exclusive()
383 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) in x86_add_exclusive()
386 atomic_inc(&x86_pmu.lbr_exclusive[what]); in x86_add_exclusive()
400 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) in x86_del_exclusive()
403 atomic_dec(&x86_pmu.lbr_exclusive[what]); in x86_del_exclusive()
414 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
425 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
428 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); in x86_setup_perfctr()
433 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
481 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { in x86_pmu_max_precise()
485 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) in x86_pmu_max_precise()
488 if (x86_pmu.pebs_prec_dist) in x86_pmu_max_precise()
510 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
557 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
558 if (x86_pmu.limit_period(event, event->attr.sample_period) > in x86_pmu_hw_config()
606 return x86_pmu.hw_config(event); in __x86_pmu_event_init()
614 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_disable_all()
654 x86_pmu.disable_all(); in x86_pmu_disable()
662 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_enable_all()
887 if (x86_pmu.start_scheduling) in x86_schedule_events()
888 x86_pmu.start_scheduling(cpuc); in x86_schedule_events()
905 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
939 int gpmax = x86_pmu.num_counters; in x86_schedule_events()
972 if (x86_pmu.commit_scheduling) in x86_schedule_events()
973 x86_pmu.commit_scheduling(cpuc, i, assign[i]); in x86_schedule_events()
982 if (x86_pmu.put_event_constraints) in x86_schedule_events()
983 x86_pmu.put_event_constraints(cpuc, e); in x86_schedule_events()
989 if (x86_pmu.stop_scheduling) in x86_schedule_events()
990 x86_pmu.stop_scheduling(cpuc); in x86_schedule_events()
1004 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; in collect_events()
1177 x86_pmu.enable_all(added); in x86_pmu_enable()
1218 if (left > x86_pmu.max_period) in x86_perf_event_set_period()
1219 left = x86_pmu.max_period; in x86_perf_event_set_period()
1221 if (x86_pmu.limit_period) in x86_perf_event_set_period()
1222 left = x86_pmu.limit_period(event, left); in x86_perf_event_set_period()
1232 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1239 if (x86_pmu.perfctr_second_write) { in x86_perf_event_set_period()
1241 (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1291 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_add()
1309 if (x86_pmu.add) { in x86_pmu_add()
1314 x86_pmu.add(event); in x86_pmu_add()
1343 x86_pmu.enable(event); in x86_pmu_start()
1355 if (!x86_pmu.num_counters) in perf_event_print_debug()
1363 if (x86_pmu.version >= 2) { in perf_event_print_debug()
1374 if (x86_pmu.pebs_constraints) { in perf_event_print_debug()
1378 if (x86_pmu.lbr_nr) { in perf_event_print_debug()
1385 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in perf_event_print_debug()
1398 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { in perf_event_print_debug()
1413 x86_pmu.disable(event); in x86_pmu_stop()
1463 if (x86_pmu.put_event_constraints) in x86_pmu_del()
1464 x86_pmu.put_event_constraints(cpuc, event); in x86_pmu_del()
1477 if (x86_pmu.del) { in x86_pmu_del()
1482 x86_pmu.del(event); in x86_pmu_del()
1506 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_handle_irq()
1513 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1537 if (!x86_pmu.apic || !x86_pmu_initialized()) in perf_events_lapic_init()
1561 ret = x86_pmu.handle_irq(regs); in perf_event_nmi_handler()
1580 if (x86_pmu.cpu_prepare) in x86_pmu_prepare_cpu()
1581 return x86_pmu.cpu_prepare(cpu); in x86_pmu_prepare_cpu()
1587 if (x86_pmu.cpu_dead) in x86_pmu_dead_cpu()
1588 x86_pmu.cpu_dead(cpu); in x86_pmu_dead_cpu()
1606 if (x86_pmu.cpu_starting) in x86_pmu_starting_cpu()
1607 x86_pmu.cpu_starting(cpu); in x86_pmu_starting_cpu()
1613 if (x86_pmu.cpu_dying) in x86_pmu_dying_cpu()
1614 x86_pmu.cpu_dying(cpu); in x86_pmu_dying_cpu()
1623 x86_pmu.apic = 0; in pmu_check_apic()
1646 u64 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1652 return x86_pmu.events_sysfs_show(page, config); in events_sysfs_show()
1717 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; in is_visible()
1784 x86_pmu.name = "HYGON"; in init_hw_perf_events()
1800 pr_cont("%s PMU driver.\n", x86_pmu.name); in init_hw_perf_events()
1802 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ in init_hw_perf_events()
1804 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
1807 if (!x86_pmu.intel_ctrl) in init_hw_perf_events()
1808 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; in init_hw_perf_events()
1814 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, in init_hw_perf_events()
1815 0, x86_pmu.num_counters, 0, 0); in init_hw_perf_events()
1817 x86_pmu_format_group.attrs = x86_pmu.format_attrs; in init_hw_perf_events()
1819 if (!x86_pmu.events_sysfs_show) in init_hw_perf_events()
1822 pmu.attr_update = x86_pmu.attr_update; in init_hw_perf_events()
1824 pr_info("... version: %d\n", x86_pmu.version); in init_hw_perf_events()
1825 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); in init_hw_perf_events()
1826 pr_info("... generic registers: %d\n", x86_pmu.num_counters); in init_hw_perf_events()
1827 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); in init_hw_perf_events()
1828 pr_info("... max period: %016Lx\n", x86_pmu.max_period); in init_hw_perf_events()
1829 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); in init_hw_perf_events()
1830 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); in init_hw_perf_events()
1870 if (x86_pmu.read) in x86_pmu_read()
1871 return x86_pmu.read(event); in x86_pmu_read()
1949 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_commit_txn()
2009 c = x86_pmu.get_event_constraints(fake_cpuc, 0, event); in validate_event()
2014 if (x86_pmu.put_event_constraints) in validate_event()
2015 x86_pmu.put_event_constraints(fake_cpuc, event); in validate_event()
2058 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); in validate_group()
2102 if (READ_ONCE(x86_pmu.attr_rdpmc) && in x86_pmu_event_init()
2152 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { in x86_pmu_event_idx()
2164 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); in get_attr_rdpmc()
2181 if (x86_pmu.attr_rdpmc_broken) in set_attr_rdpmc()
2184 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) { in set_attr_rdpmc()
2197 x86_pmu.attr_rdpmc = val; in set_attr_rdpmc()
2242 if (x86_pmu.sched_task) in x86_pmu_sched_task()
2243 x86_pmu.sched_task(ctx, sched_in); in x86_pmu_sched_task()
2248 if (x86_pmu.check_microcode) in perf_check_microcode()
2249 x86_pmu.check_microcode(); in perf_check_microcode()
2254 if (x86_pmu.check_period && x86_pmu.check_period(event, value)) in x86_pmu_check_period()
2257 if (value && x86_pmu.limit_period) { in x86_pmu_check_period()
2258 if (x86_pmu.limit_period(event, value) > value) in x86_pmu_check_period()
2270 if (x86_pmu.aux_output_match) in x86_pmu_aux_output_match()
2271 return x86_pmu.aux_output_match(event); in x86_pmu_aux_output_match()
2315 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2583 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
2584 cap->num_counters_gp = x86_pmu.num_counters; in perf_get_x86_pmu_capability()
2585 cap->num_counters_fixed = x86_pmu.num_counters_fixed; in perf_get_x86_pmu_capability()
2586 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2587 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2588 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
2589 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()