Lines Matching full:pmu
80 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) in pmu_needs_timer() argument
82 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer()
90 enable = pmu->enable; in pmu_needs_timer()
121 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_parked() local
123 if (!pmu->base.event_init) in i915_pmu_gt_parked()
126 spin_lock_irq(&pmu->lock); in i915_pmu_gt_parked()
131 pmu->timer_enabled = pmu_needs_timer(pmu, false); in i915_pmu_gt_parked()
132 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_parked()
135 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) in __i915_pmu_maybe_start_timer() argument
137 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { in __i915_pmu_maybe_start_timer()
138 pmu->timer_enabled = true; in __i915_pmu_maybe_start_timer()
139 pmu->timer_last = ktime_get(); in __i915_pmu_maybe_start_timer()
140 hrtimer_start_range_ns(&pmu->timer, in __i915_pmu_maybe_start_timer()
148 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_unparked() local
150 if (!pmu->base.event_init) in i915_pmu_gt_unparked()
153 spin_lock_irq(&pmu->lock); in i915_pmu_gt_unparked()
157 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_gt_unparked()
158 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_unparked()
174 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) in engines_sample()
178 struct intel_engine_pmu *pmu = &engine->pmu; in engines_sample() local
193 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); in engines_sample()
195 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); in engines_sample()
210 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); in engines_sample()
229 struct i915_pmu *pmu = &i915->pmu; in frequency_sample() local
231 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { in frequency_sample()
241 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], in frequency_sample()
246 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { in frequency_sample()
247 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], in frequency_sample()
256 container_of(hrtimer, struct drm_i915_private, pmu.timer); in i915_sample()
257 struct i915_pmu *pmu = &i915->pmu; in i915_sample() local
262 if (!READ_ONCE(pmu->timer_enabled)) in i915_sample()
266 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); in i915_sample()
267 pmu->timer_last = now; in i915_sample()
302 container_of(event->pmu, typeof(*i915), pmu.base); in engine_event_destroy()
372 container_of(event->pmu, typeof(*i915), pmu.base); in engine_event_init()
396 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_init()
399 if (event->attr.type != event->pmu->type) in i915_pmu_event_init()
453 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local
469 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
471 if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { in get_rc6()
472 pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; in get_rc6()
473 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6()
475 val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; in get_rc6()
478 spin_unlock_irqrestore(&pmu->lock, flags); in get_rc6()
489 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
505 if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) in get_rc6()
506 pmu->suspended_time_last = val; in get_rc6()
508 val -= pmu->suspended_time_last; in get_rc6()
509 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6()
511 pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; in get_rc6()
512 } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { in get_rc6()
513 val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; in get_rc6()
515 val = pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6()
518 spin_unlock_irqrestore(&pmu->lock, flags); in get_rc6()
530 container_of(event->pmu, typeof(*i915), pmu.base); in __i915_pmu_event_read()
531 struct i915_pmu *pmu = &i915->pmu; in __i915_pmu_event_read() local
548 val = engine->pmu.sample[sample].cur; in __i915_pmu_event_read()
554 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, in __i915_pmu_event_read()
559 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, in __i915_pmu_event_read()
592 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_enable()
594 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_enable() local
597 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_enable()
603 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); in i915_pmu_enable()
604 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_enable()
605 GEM_BUG_ON(pmu->enable_count[bit] == ~0); in i915_pmu_enable()
606 pmu->enable |= BIT_ULL(bit); in i915_pmu_enable()
607 pmu->enable_count[bit]++; in i915_pmu_enable()
612 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_enable()
626 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != in i915_pmu_enable()
628 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != in i915_pmu_enable()
630 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_enable()
631 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_enable()
632 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); in i915_pmu_enable()
634 engine->pmu.enable |= BIT(sample); in i915_pmu_enable()
635 engine->pmu.enable_count[sample]++; in i915_pmu_enable()
638 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_enable()
651 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_disable()
653 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_disable() local
656 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_disable()
666 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_disable()
667 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_disable()
668 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); in i915_pmu_disable()
674 if (--engine->pmu.enable_count[sample] == 0) in i915_pmu_disable()
675 engine->pmu.enable &= ~BIT(sample); in i915_pmu_disable()
678 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_disable()
679 GEM_BUG_ON(pmu->enable_count[bit] == 0); in i915_pmu_disable()
684 if (--pmu->enable_count[bit] == 0) { in i915_pmu_disable()
685 pmu->enable &= ~BIT_ULL(bit); in i915_pmu_disable()
686 pmu->timer_enabled &= pmu_needs_timer(pmu, true); in i915_pmu_disable()
689 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_disable()
838 create_event_attributes(struct i915_pmu *pmu) in create_event_attributes() argument
840 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in create_event_attributes()
953 pmu->i915_attr = i915_attr; in create_event_attributes()
954 pmu->pmu_attr = pmu_attr; in create_event_attributes()
970 static void free_event_attributes(struct i915_pmu *pmu) in free_event_attributes() argument
978 kfree(pmu->i915_attr); in free_event_attributes()
979 kfree(pmu->pmu_attr); in free_event_attributes()
982 pmu->i915_attr = NULL; in free_event_attributes()
983 pmu->pmu_attr = NULL; in free_event_attributes()
988 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); in i915_pmu_cpu_online() local
990 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_online()
1001 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); in i915_pmu_cpu_offline() local
1004 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_offline()
1011 perf_pmu_migrate_context(&pmu->base, cpu, target); in i915_pmu_cpu_offline()
1020 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_register_cpuhp_state() argument
1033 ret = cpuhp_state_add_instance(slot, &pmu->node); in i915_pmu_register_cpuhp_state()
1043 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_unregister_cpuhp_state() argument
1046 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node)); in i915_pmu_unregister_cpuhp_state()
1052 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_register() local
1056 dev_info(i915->drm.dev, "PMU not supported for this GPU."); in i915_pmu_register()
1060 i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); in i915_pmu_register()
1066 pmu->base.attr_groups = i915_pmu_attr_groups; in i915_pmu_register()
1067 pmu->base.task_ctx_nr = perf_invalid_context; in i915_pmu_register()
1068 pmu->base.event_init = i915_pmu_event_init; in i915_pmu_register()
1069 pmu->base.add = i915_pmu_event_add; in i915_pmu_register()
1070 pmu->base.del = i915_pmu_event_del; in i915_pmu_register()
1071 pmu->base.start = i915_pmu_event_start; in i915_pmu_register()
1072 pmu->base.stop = i915_pmu_event_stop; in i915_pmu_register()
1073 pmu->base.read = i915_pmu_event_read; in i915_pmu_register()
1074 pmu->base.event_idx = i915_pmu_event_event_idx; in i915_pmu_register()
1076 spin_lock_init(&pmu->lock); in i915_pmu_register()
1077 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in i915_pmu_register()
1078 pmu->timer.function = i915_sample; in i915_pmu_register()
1080 ret = perf_pmu_register(&pmu->base, "i915", -1); in i915_pmu_register()
1084 ret = i915_pmu_register_cpuhp_state(pmu); in i915_pmu_register()
1091 perf_pmu_unregister(&pmu->base); in i915_pmu_register()
1093 pmu->base.event_init = NULL; in i915_pmu_register()
1094 free_event_attributes(pmu); in i915_pmu_register()
1095 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); in i915_pmu_register()
1100 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_unregister() local
1102 if (!pmu->base.event_init) in i915_pmu_unregister()
1105 WARN_ON(pmu->enable); in i915_pmu_unregister()
1107 hrtimer_cancel(&pmu->timer); in i915_pmu_unregister()
1109 i915_pmu_unregister_cpuhp_state(pmu); in i915_pmu_unregister()
1111 perf_pmu_unregister(&pmu->base); in i915_pmu_unregister()
1112 pmu->base.event_init = NULL; in i915_pmu_unregister()
1113 free_event_attributes(pmu); in i915_pmu_unregister()