Lines Matching full:pmu

85 	struct pmu *pmu; /* for custom pmu ops */  member
87 * Uncore PMU would store relevant platform topology configuration here
119 struct pmu pmu; member
148 struct intel_uncore_pmu *pmu; member
201 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu()
241 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset()
245 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset()
253 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl()
254 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl()
259 return box->pmu->type->box_ctl; in uncore_pci_box_ctl()
264 return box->pmu->type->fixed_ctl; in uncore_pci_fixed_ctl()
269 return box->pmu->type->fixed_ctr; in uncore_pci_fixed_ctr()
276 return idx * 8 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
278 return idx * 4 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
284 return idx * 8 + box->pmu->type->perf_ctr; in uncore_pci_perf_ctr()
289 struct intel_uncore_pmu *pmu = box->pmu; in uncore_msr_box_offset() local
290 return pmu->type->msr_offsets ? in uncore_msr_box_offset()
291 pmu->type->msr_offsets[pmu->pmu_idx] : in uncore_msr_box_offset()
292 pmu->type->msr_offset * pmu->pmu_idx; in uncore_msr_box_offset()
297 if (!box->pmu->type->box_ctl) in uncore_msr_box_ctl()
299 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); in uncore_msr_box_ctl()
304 if (!box->pmu->type->fixed_ctl) in uncore_msr_fixed_ctl()
306 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); in uncore_msr_fixed_ctl()
311 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); in uncore_msr_fixed_ctr()
361 struct intel_uncore_pmu *pmu = box->pmu; in uncore_freerunning_counter() local
363 return pmu->type->freerunning[type].counter_base + in uncore_freerunning_counter()
364 pmu->type->freerunning[type].counter_offset * idx + in uncore_freerunning_counter()
365 (pmu->type->freerunning[type].box_offsets ? in uncore_freerunning_counter()
366 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : in uncore_freerunning_counter()
367 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); in uncore_freerunning_counter()
375 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_event_ctl()
377 return box->pmu->type->event_ctl + in uncore_msr_event_ctl()
378 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_event_ctl()
388 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_perf_ctr()
390 return box->pmu->type->perf_ctr + in uncore_msr_perf_ctr()
391 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_perf_ctr()
434 return box->pmu->type->perf_ctr_bits; in uncore_perf_ctr_bits()
439 return box->pmu->type->fixed_ctr_bits; in uncore_fixed_ctr_bits()
448 return box->pmu->type->freerunning[type].bits; in uncore_freerunning_bits()
456 return box->pmu->type->freerunning[type].num_counters; in uncore_num_freerunning()
462 return box->pmu->type->num_freerunning_types; in uncore_num_freerunning_types()
477 return box->pmu->type->num_counters; in uncore_num_counters()
501 box->pmu->type->ops->disable_event(box, event); in uncore_disable_event()
507 box->pmu->type->ops->enable_event(box, event); in uncore_enable_event()
513 return box->pmu->type->ops->read_counter(box, event); in uncore_read_counter()
519 if (box->pmu->type->ops->init_box) in uncore_box_init()
520 box->pmu->type->ops->init_box(box); in uncore_box_init()
527 if (box->pmu->type->ops->exit_box) in uncore_box_exit()
528 box->pmu->type->ops->exit_box(box); in uncore_box_exit()
539 return container_of(event->pmu, struct intel_uncore_pmu, pmu); in uncore_event_to_pmu()
547 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
564 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);