Lines Matching full:pmu

40 #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
61 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
62 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
68 struct pmu pmu; member
84 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local
86 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in ddr_perf_cpumask_show()
181 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) in ddr_perf_alloc_counter() argument
191 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) in ddr_perf_alloc_counter()
198 if (pmu->events[i] == NULL) in ddr_perf_alloc_counter()
205 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) in ddr_perf_free_counter() argument
207 pmu->events[counter] = NULL; in ddr_perf_free_counter()
210 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) in ddr_perf_read_counter() argument
212 return readl_relaxed(pmu->base + COUNTER_READ + counter * 4); in ddr_perf_read_counter()
237 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_init() local
241 if (event->attr.type != event->pmu->type) in ddr_perf_event_init()
248 dev_warn(pmu->dev, "Can't provide per-task data!\n"); in ddr_perf_event_init()
257 if (event->group_leader->pmu != event->pmu && in ddr_perf_event_init()
261 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { in ddr_perf_event_init()
271 if (sibling->pmu != event->pmu && in ddr_perf_event_init()
276 event->cpu = pmu->cpu; in ddr_perf_event_init()
285 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_update() local
292 new_raw_count = ddr_perf_read_counter(pmu, counter); in ddr_perf_event_update()
301 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, in ddr_perf_counter_enable() argument
313 writel(0, pmu->base + reg); in ddr_perf_counter_enable()
316 writel(val, pmu->base + reg); in ddr_perf_counter_enable()
319 writel(0, pmu->base + reg); in ddr_perf_counter_enable()
325 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_start() local
331 ddr_perf_counter_enable(pmu, event->attr.config, counter, true); in ddr_perf_event_start()
338 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_add() local
344 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { in ddr_perf_event_add()
348 if (pmu->events[i] && in ddr_perf_event_add()
349 !ddr_perf_filters_compatible(event, pmu->events[i])) in ddr_perf_event_add()
356 writel(cfg1, pmu->base + COUNTER_DPCR1); in ddr_perf_event_add()
360 counter = ddr_perf_alloc_counter(pmu, cfg); in ddr_perf_event_add()
362 dev_dbg(pmu->dev, "There are not enough counters\n"); in ddr_perf_event_add()
366 pmu->events[counter] = event; in ddr_perf_event_add()
367 pmu->active_events++; in ddr_perf_event_add()
380 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_stop() local
384 ddr_perf_counter_enable(pmu, event->attr.config, counter, false); in ddr_perf_event_stop()
392 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); in ddr_perf_event_del() local
398 ddr_perf_free_counter(pmu, counter); in ddr_perf_event_del()
399 pmu->active_events--; in ddr_perf_event_del()
403 static void ddr_perf_pmu_enable(struct pmu *pmu) in ddr_perf_pmu_enable() argument
405 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); in ddr_perf_pmu_enable()
415 static void ddr_perf_pmu_disable(struct pmu *pmu) in ddr_perf_pmu_disable() argument
417 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); in ddr_perf_pmu_disable()
426 static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, in ddr_perf_init() argument
429 *pmu = (struct ddr_pmu) { in ddr_perf_init()
430 .pmu = (struct pmu) { in ddr_perf_init()
447 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); in ddr_perf_init()
448 return pmu->id; in ddr_perf_init()
454 struct ddr_pmu *pmu = (struct ddr_pmu *) p; in ddr_perf_irq_handler() local
458 ddr_perf_counter_enable(pmu, in ddr_perf_irq_handler()
474 if (!pmu->events[i]) in ddr_perf_irq_handler()
477 event = pmu->events[i]; in ddr_perf_irq_handler()
485 ddr_perf_counter_enable(pmu, in ddr_perf_irq_handler()
497 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); in ddr_perf_offline_cpu() local
500 if (cpu != pmu->cpu) in ddr_perf_offline_cpu()
507 perf_pmu_migrate_context(&pmu->pmu, cpu, target); in ddr_perf_offline_cpu()
508 pmu->cpu = target; in ddr_perf_offline_cpu()
510 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu))); in ddr_perf_offline_cpu()
517 struct ddr_pmu *pmu; in ddr_perf_probe() local
531 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); in ddr_perf_probe()
532 if (!pmu) in ddr_perf_probe()
535 num = ddr_perf_init(pmu, base, &pdev->dev); in ddr_perf_probe()
537 platform_set_drvdata(pdev, pmu); in ddr_perf_probe()
544 pmu->devtype_data = of_device_get_match_data(&pdev->dev); in ddr_perf_probe()
546 pmu->cpu = raw_smp_processor_id(); in ddr_perf_probe()
557 pmu->cpuhp_state = ret; in ddr_perf_probe()
559 /* Register the pmu instance for cpu hotplug */ in ddr_perf_probe()
560 cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); in ddr_perf_probe()
574 pmu); in ddr_perf_probe()
580 pmu->irq = irq; in ddr_perf_probe()
581 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)); in ddr_perf_probe()
583 dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); in ddr_perf_probe()
587 ret = perf_pmu_register(&pmu->pmu, name, -1); in ddr_perf_probe()
594 if (pmu->cpuhp_state) in ddr_perf_probe()
595 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); in ddr_perf_probe()
597 ida_simple_remove(&ddr_ida, pmu->id); in ddr_perf_probe()
598 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); in ddr_perf_probe()
604 struct ddr_pmu *pmu = platform_get_drvdata(pdev); in ddr_perf_remove() local
606 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); in ddr_perf_remove()
607 irq_set_affinity_hint(pmu->irq, NULL); in ddr_perf_remove()
609 perf_pmu_unregister(&pmu->pmu); in ddr_perf_remove()
611 ida_simple_remove(&ddr_ida, pmu->id); in ddr_perf_remove()
617 .name = "imx-ddr-pmu",