Lines Matching +full:event +full:- +full:name

2  * SPDX-License-Identifier: MIT
4 * Copyright © 2017-2018 Intel Corporation
30 static unsigned int i915_pmu_target_cpu = -1;
37 static u8 engine_event_sample(struct perf_event *event) in engine_event_sample() argument
39 return engine_config_sample(event->attr.config); in engine_event_sample()
42 static u8 engine_event_class(struct perf_event *event) in engine_event_class() argument
44 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; in engine_event_class()
47 static u8 engine_event_instance(struct perf_event *event) in engine_event_instance() argument
49 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; in engine_event_instance()
76 return -1; in other_bit()
95 static bool is_engine_event(struct perf_event *event) in is_engine_event() argument
97 return is_engine_config(event->attr.config); in is_engine_event()
100 static unsigned int event_bit(struct perf_event *event) in event_bit() argument
102 return config_bit(event->attr.config); in event_bit()
115 enable = pmu->enable; in pmu_needs_timer()
126 * When the GPU is idle per-engine counters do not need to be in pmu_needs_timer()
135 else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) in pmu_needs_timer()
146 struct drm_i915_private *i915 = gt->i915; in __get_rc6()
149 val = intel_rc6_residency_ns(&gt->rc6, in __get_rc6()
155 val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p); in __get_rc6()
158 val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp); in __get_rc6()
170 struct drm_i915_private *i915 = gt->i915; in get_rc6()
171 struct i915_pmu *pmu = &i915->pmu; in get_rc6()
182 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
185 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6()
194 val = ktime_since_raw(pmu->sleep_last); in get_rc6()
195 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6()
198 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) in get_rc6()
199 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; in get_rc6()
201 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; in get_rc6()
203 spin_unlock_irqrestore(&pmu->lock, flags); in get_rc6()
213 with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) { in init_rc6()
214 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in init_rc6()
215 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = in init_rc6()
216 pmu->sample[__I915_SAMPLE_RC6].cur; in init_rc6()
217 pmu->sleep_last = ktime_get_raw(); in init_rc6()
223 struct i915_pmu *pmu = &i915->pmu; in park_rc6()
225 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in park_rc6()
226 pmu->sleep_last = ktime_get_raw(); in park_rc6()
231 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { in __i915_pmu_maybe_start_timer()
232 pmu->timer_enabled = true; in __i915_pmu_maybe_start_timer()
233 pmu->timer_last = ktime_get(); in __i915_pmu_maybe_start_timer()
234 hrtimer_start_range_ns(&pmu->timer, in __i915_pmu_maybe_start_timer()
242 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_parked()
244 if (!pmu->base.event_init) in i915_pmu_gt_parked()
247 spin_lock_irq(&pmu->lock); in i915_pmu_gt_parked()
255 pmu->timer_enabled = pmu_needs_timer(pmu, false); in i915_pmu_gt_parked()
257 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_parked()
262 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_unparked()
264 if (!pmu->base.event_init) in i915_pmu_gt_unparked()
267 spin_lock_irq(&pmu->lock); in i915_pmu_gt_unparked()
270 * Re-enable sampling timer when GPU goes active. in i915_pmu_gt_unparked()
274 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_unparked()
280 sample->cur += val; in add_sample()
295 struct intel_engine_pmu *pmu = &engine->pmu; in engine_sample()
304 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); in engine_sample()
306 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); in engine_sample()
313 * While waiting on a semaphore or event, MI_MODE reports the in engine_sample()
325 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); in engine_sample()
331 struct drm_i915_private *i915 = gt->i915; in engines_sample()
336 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) in engines_sample()
347 spin_lock_irqsave(&engine->uncore->lock, flags); in engines_sample()
349 spin_unlock_irqrestore(&engine->uncore->lock, flags); in engines_sample()
361 sample->cur += mul_u32_u32(val, mul); in add_sample_mult()
366 return pmu->enable & in frequency_sampling_enabled()
374 struct drm_i915_private *i915 = gt->i915; in frequency_sample()
375 struct intel_uncore *uncore = gt->uncore; in frequency_sample()
376 struct i915_pmu *pmu = &i915->pmu; in frequency_sample()
377 struct intel_rps *rps = &gt->rps; in frequency_sample()
386 if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { in frequency_sample()
394 * mmio power well, then it will return 0 -- in which in frequency_sample()
402 val = rps->cur_freq; in frequency_sample()
404 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], in frequency_sample()
408 if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { in frequency_sample()
409 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], in frequency_sample()
421 struct i915_pmu *pmu = &i915->pmu; in i915_sample()
422 struct intel_gt *gt = &i915->gt; in i915_sample()
426 if (!READ_ONCE(pmu->timer_enabled)) in i915_sample()
430 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); in i915_sample()
431 pmu->timer_last = now; in i915_sample()
436 * grabbing the forcewake. However the potential error from timer call- in i915_sample()
447 static void i915_pmu_event_destroy(struct perf_event *event) in i915_pmu_event_destroy() argument
450 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_destroy()
452 drm_WARN_ON(&i915->drm, event->parent); in i915_pmu_event_destroy()
454 drm_dev_put(&i915->drm); in i915_pmu_event_destroy()
466 if (GRAPHICS_VER(engine->i915) < 6) in engine_event_status()
467 return -ENODEV; in engine_event_status()
470 return -ENOENT; in engine_event_status()
479 struct intel_gt *gt = &i915->gt; in config_status()
485 return -ENODEV; in config_status()
489 return -ENODEV; in config_status()
494 if (!gt->rc6.supported) in config_status()
495 return -ENODEV; in config_status()
500 return -ENOENT; in config_status()
506 static int engine_event_init(struct perf_event *event) in engine_event_init() argument
509 container_of(event->pmu, typeof(*i915), pmu.base); in engine_event_init()
512 engine = intel_engine_lookup_user(i915, engine_event_class(event), in engine_event_init()
513 engine_event_instance(event)); in engine_event_init()
515 return -ENODEV; in engine_event_init()
517 return engine_event_status(engine, engine_event_sample(event)); in engine_event_init()
520 static int i915_pmu_event_init(struct perf_event *event) in i915_pmu_event_init() argument
523 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_init()
524 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_init()
527 if (pmu->closed) in i915_pmu_event_init()
528 return -ENODEV; in i915_pmu_event_init()
530 if (event->attr.type != event->pmu->type) in i915_pmu_event_init()
531 return -ENOENT; in i915_pmu_event_init()
534 if (event->attr.sample_period) /* no sampling */ in i915_pmu_event_init()
535 return -EINVAL; in i915_pmu_event_init()
537 if (has_branch_stack(event)) in i915_pmu_event_init()
538 return -EOPNOTSUPP; in i915_pmu_event_init()
540 if (event->cpu < 0) in i915_pmu_event_init()
541 return -EINVAL; in i915_pmu_event_init()
544 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) in i915_pmu_event_init()
545 return -EINVAL; in i915_pmu_event_init()
547 if (is_engine_event(event)) in i915_pmu_event_init()
548 ret = engine_event_init(event); in i915_pmu_event_init()
550 ret = config_status(i915, event->attr.config); in i915_pmu_event_init()
554 if (!event->parent) { in i915_pmu_event_init()
555 drm_dev_get(&i915->drm); in i915_pmu_event_init()
556 event->destroy = i915_pmu_event_destroy; in i915_pmu_event_init()
562 static u64 __i915_pmu_event_read(struct perf_event *event) in __i915_pmu_event_read() argument
565 container_of(event->pmu, typeof(*i915), pmu.base); in __i915_pmu_event_read()
566 struct i915_pmu *pmu = &i915->pmu; in __i915_pmu_event_read()
569 if (is_engine_event(event)) { in __i915_pmu_event_read()
570 u8 sample = engine_event_sample(event); in __i915_pmu_event_read()
574 engine_event_class(event), in __i915_pmu_event_read()
575 engine_event_instance(event)); in __i915_pmu_event_read()
577 if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { in __i915_pmu_event_read()
586 val = engine->pmu.sample[sample].cur; in __i915_pmu_event_read()
589 switch (event->attr.config) { in __i915_pmu_event_read()
592 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, in __i915_pmu_event_read()
597 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, in __i915_pmu_event_read()
601 val = READ_ONCE(pmu->irq_count); in __i915_pmu_event_read()
604 val = get_rc6(&i915->gt); in __i915_pmu_event_read()
607 val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt)); in __i915_pmu_event_read()
615 static void i915_pmu_event_read(struct perf_event *event) in i915_pmu_event_read() argument
618 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_read()
619 struct hw_perf_event *hwc = &event->hw; in i915_pmu_event_read()
620 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_read()
623 if (pmu->closed) { in i915_pmu_event_read()
624 event->hw.state = PERF_HES_STOPPED; in i915_pmu_event_read()
628 prev = local64_read(&hwc->prev_count); in i915_pmu_event_read()
629 new = __i915_pmu_event_read(event); in i915_pmu_event_read()
631 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) in i915_pmu_event_read()
634 local64_add(new - prev, &event->count); in i915_pmu_event_read()
637 static void i915_pmu_enable(struct perf_event *event) in i915_pmu_enable() argument
640 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_enable()
641 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_enable()
645 bit = event_bit(event); in i915_pmu_enable()
646 if (bit == -1) in i915_pmu_enable()
649 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_enable()
653 * the event reference counter. in i915_pmu_enable()
655 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); in i915_pmu_enable()
656 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_enable()
657 GEM_BUG_ON(pmu->enable_count[bit] == ~0); in i915_pmu_enable()
659 pmu->enable |= BIT_ULL(bit); in i915_pmu_enable()
660 pmu->enable_count[bit]++; in i915_pmu_enable()
668 * For per-engine events the bitmask and reference counting in i915_pmu_enable()
671 if (is_engine_event(event)) { in i915_pmu_enable()
672 u8 sample = engine_event_sample(event); in i915_pmu_enable()
676 engine_event_class(event), in i915_pmu_enable()
677 engine_event_instance(event)); in i915_pmu_enable()
679 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != in i915_pmu_enable()
681 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != in i915_pmu_enable()
683 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_enable()
684 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_enable()
685 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); in i915_pmu_enable()
687 engine->pmu.enable |= BIT(sample); in i915_pmu_enable()
688 engine->pmu.enable_count[sample]++; in i915_pmu_enable()
691 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_enable()
696 * for all listeners. Even when the event was already enabled and has in i915_pmu_enable()
697 * an existing non-zero value. in i915_pmu_enable()
699 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); in i915_pmu_enable()
702 static void i915_pmu_disable(struct perf_event *event) in i915_pmu_disable() argument
705 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_disable()
706 unsigned int bit = event_bit(event); in i915_pmu_disable()
707 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_disable()
710 if (bit == -1) in i915_pmu_disable()
713 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_disable()
715 if (is_engine_event(event)) { in i915_pmu_disable()
716 u8 sample = engine_event_sample(event); in i915_pmu_disable()
720 engine_event_class(event), in i915_pmu_disable()
721 engine_event_instance(event)); in i915_pmu_disable()
723 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_disable()
724 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_disable()
725 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); in i915_pmu_disable()
729 * bitmask when the last listener on an event goes away. in i915_pmu_disable()
731 if (--engine->pmu.enable_count[sample] == 0) in i915_pmu_disable()
732 engine->pmu.enable &= ~BIT(sample); in i915_pmu_disable()
735 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_disable()
736 GEM_BUG_ON(pmu->enable_count[bit] == 0); in i915_pmu_disable()
739 * bitmask when the last listener on an event goes away. in i915_pmu_disable()
741 if (--pmu->enable_count[bit] == 0) { in i915_pmu_disable()
742 pmu->enable &= ~BIT_ULL(bit); in i915_pmu_disable()
743 pmu->timer_enabled &= pmu_needs_timer(pmu, true); in i915_pmu_disable()
746 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_disable()
749 static void i915_pmu_event_start(struct perf_event *event, int flags) in i915_pmu_event_start() argument
752 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_start()
753 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_start()
755 if (pmu->closed) in i915_pmu_event_start()
758 i915_pmu_enable(event); in i915_pmu_event_start()
759 event->hw.state = 0; in i915_pmu_event_start()
762 static void i915_pmu_event_stop(struct perf_event *event, int flags) in i915_pmu_event_stop() argument
765 i915_pmu_event_read(event); in i915_pmu_event_stop()
766 i915_pmu_disable(event); in i915_pmu_event_stop()
767 event->hw.state = PERF_HES_STOPPED; in i915_pmu_event_stop()
770 static int i915_pmu_event_add(struct perf_event *event, int flags) in i915_pmu_event_add() argument
773 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_add()
774 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_add()
776 if (pmu->closed) in i915_pmu_event_add()
777 return -ENODEV; in i915_pmu_event_add()
780 i915_pmu_event_start(event, flags); in i915_pmu_event_add()
785 static void i915_pmu_event_del(struct perf_event *event, int flags) in i915_pmu_event_del() argument
787 i915_pmu_event_stop(event, PERF_EF_UPDATE); in i915_pmu_event_del()
790 static int i915_pmu_event_event_idx(struct perf_event *event) in i915_pmu_event_event_idx() argument
806 return sprintf(buf, "%s\n", eattr->str); in i915_pmu_format_show()
816 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
821 .name = "format",
836 return sprintf(buf, "config=0x%lx\n", eattr->val); in i915_pmu_event_show()
859 .name = (__name), \
866 .name = (__name), \
870 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) in add_i915_attr() argument
872 sysfs_attr_init(&attr->attr.attr); in add_i915_attr()
873 attr->attr.attr.name = name; in add_i915_attr()
874 attr->attr.attr.mode = 0444; in add_i915_attr()
875 attr->attr.show = i915_pmu_event_show; in add_i915_attr()
876 attr->val = config; in add_i915_attr()
882 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, in add_pmu_attr() argument
885 sysfs_attr_init(&attr->attr.attr); in add_pmu_attr()
886 attr->attr.attr.name = name; in add_pmu_attr()
887 attr->attr.attr.mode = 0444; in add_pmu_attr()
888 attr->attr.show = perf_event_sysfs_show; in add_pmu_attr()
889 attr->event_str = str; in add_pmu_attr()
900 const char *name; in create_event_attributes() member
903 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), in create_event_attributes()
904 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), in create_event_attributes()
906 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), in create_event_attributes()
907 __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"), in create_event_attributes()
911 char *name; in create_event_attributes() member
956 /* Initialize supported non-engine counters. */ in create_event_attributes()
963 str = kstrdup(events[i].name, GFP_KERNEL); in create_event_attributes()
967 *attr_iter++ = &i915_iter->attr.attr; in create_event_attributes()
971 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); in create_event_attributes()
975 *attr_iter++ = &pmu_iter->attr.attr; in create_event_attributes()
989 str = kasprintf(GFP_KERNEL, "%s-%s", in create_event_attributes()
990 engine->name, engine_events[i].name); in create_event_attributes()
994 *attr_iter++ = &i915_iter->attr.attr; in create_event_attributes()
997 __I915_PMU_ENGINE(engine->uabi_class, in create_event_attributes()
998 engine->uabi_instance, in create_event_attributes()
1001 str = kasprintf(GFP_KERNEL, "%s-%s.unit", in create_event_attributes()
1002 engine->name, engine_events[i].name); in create_event_attributes()
1006 *attr_iter++ = &pmu_iter->attr.attr; in create_event_attributes()
1011 pmu->i915_attr = i915_attr; in create_event_attributes()
1012 pmu->pmu_attr = pmu_attr; in create_event_attributes()
1018 kfree((*attr_iter)->name); in create_event_attributes()
1030 struct attribute **attr_iter = pmu->events_attr_group.attrs; in free_event_attributes()
1033 kfree((*attr_iter)->name); in free_event_attributes()
1035 kfree(pmu->events_attr_group.attrs); in free_event_attributes()
1036 kfree(pmu->i915_attr); in free_event_attributes()
1037 kfree(pmu->pmu_attr); in free_event_attributes()
1039 pmu->events_attr_group.attrs = NULL; in free_event_attributes()
1040 pmu->i915_attr = NULL; in free_event_attributes()
1041 pmu->pmu_attr = NULL; in free_event_attributes()
1048 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_online()
1062 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_offline()
1065 * Unregistering an instance generates a CPU offline event which we must in i915_pmu_cpu_offline()
1068 if (pmu->closed) in i915_pmu_cpu_offline()
1081 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { in i915_pmu_cpu_offline()
1082 perf_pmu_migrate_context(&pmu->base, cpu, target); in i915_pmu_cpu_offline()
1083 pmu->cpuhp.cpu = target; in i915_pmu_cpu_offline()
1117 return -EINVAL; in i915_pmu_register_cpuhp_state()
1119 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); in i915_pmu_register_cpuhp_state()
1124 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); in i915_pmu_unregister_cpuhp_state()
1129 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); in is_igp()
1132 return pci_domain_nr(pdev->bus) == 0 && in is_igp()
1133 pdev->bus->number == 0 && in is_igp()
1134 PCI_SLOT(pdev->devfn) == 2 && in is_igp()
1135 PCI_FUNC(pdev->devfn) == 0; in is_igp()
1140 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_register()
1143 &pmu->events_attr_group, in i915_pmu_register()
1148 int ret = -ENOMEM; in i915_pmu_register()
1151 drm_info(&i915->drm, "PMU not supported for this GPU."); in i915_pmu_register()
1155 spin_lock_init(&pmu->lock); in i915_pmu_register()
1156 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in i915_pmu_register()
1157 pmu->timer.function = i915_sample; in i915_pmu_register()
1158 pmu->cpuhp.cpu = -1; in i915_pmu_register()
1162 pmu->name = kasprintf(GFP_KERNEL, in i915_pmu_register()
1164 dev_name(i915->drm.dev)); in i915_pmu_register()
1165 if (pmu->name) { in i915_pmu_register()
1167 strreplace((char *)pmu->name, ':', '_'); in i915_pmu_register()
1170 pmu->name = "i915"; in i915_pmu_register()
1172 if (!pmu->name) in i915_pmu_register()
1175 pmu->events_attr_group.name = "events"; in i915_pmu_register()
1176 pmu->events_attr_group.attrs = create_event_attributes(pmu); in i915_pmu_register()
1177 if (!pmu->events_attr_group.attrs) in i915_pmu_register()
1180 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), in i915_pmu_register()
1182 if (!pmu->base.attr_groups) in i915_pmu_register()
1185 pmu->base.module = THIS_MODULE; in i915_pmu_register()
1186 pmu->base.task_ctx_nr = perf_invalid_context; in i915_pmu_register()
1187 pmu->base.event_init = i915_pmu_event_init; in i915_pmu_register()
1188 pmu->base.add = i915_pmu_event_add; in i915_pmu_register()
1189 pmu->base.del = i915_pmu_event_del; in i915_pmu_register()
1190 pmu->base.start = i915_pmu_event_start; in i915_pmu_register()
1191 pmu->base.stop = i915_pmu_event_stop; in i915_pmu_register()
1192 pmu->base.read = i915_pmu_event_read; in i915_pmu_register()
1193 pmu->base.event_idx = i915_pmu_event_event_idx; in i915_pmu_register()
1195 ret = perf_pmu_register(&pmu->base, pmu->name, -1); in i915_pmu_register()
1206 perf_pmu_unregister(&pmu->base); in i915_pmu_register()
1208 kfree(pmu->base.attr_groups); in i915_pmu_register()
1210 pmu->base.event_init = NULL; in i915_pmu_register()
1214 kfree(pmu->name); in i915_pmu_register()
1216 drm_notice(&i915->drm, "Failed to register PMU!\n"); in i915_pmu_register()
1221 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_unregister()
1223 if (!pmu->base.event_init) in i915_pmu_unregister()
1227 * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu in i915_pmu_unregister()
1231 pmu->closed = true; in i915_pmu_unregister()
1234 hrtimer_cancel(&pmu->timer); in i915_pmu_unregister()
1238 perf_pmu_unregister(&pmu->base); in i915_pmu_unregister()
1239 pmu->base.event_init = NULL; in i915_pmu_unregister()
1240 kfree(pmu->base.attr_groups); in i915_pmu_unregister()
1242 kfree(pmu->name); in i915_pmu_unregister()