Lines Matching +full:cluster +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
26 #include <soc/qcom/kryo-l2-accessors.h>
121 * The cache is made up of one or more clusters, each cluster has its own PMU.
122 * Each cluster is associated with one or more CPUs.
125 * Events can be envisioned as a 2-dimensional array. Each column represents
143 /* The CPU that is used for collecting events on this cluster */
145 /* All the CPUs associated with this cluster */
166 return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu); in get_cluster_pmu()
243 static void cluster_pmu_set_resr(struct cluster_pmu *cluster, in cluster_pmu_set_resr() argument
254 spin_lock_irqsave(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
262 spin_unlock_irqrestore(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
268 * all CPUS, subunits and ID independent events in this cluster.
299 struct hw_perf_event *hwc = &event->hw; in l2_cache_event_update()
301 u32 idx = hwc->idx; in l2_cache_event_update()
304 prev = local64_read(&hwc->prev_count); in l2_cache_event_update()
306 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); in l2_cache_event_update()
309 * The cycle counter is 64-bit, but all other counters are in l2_cache_event_update()
310 * 32-bit, and we must handle 32-bit overflow explicitly. in l2_cache_event_update()
312 delta = now - prev; in l2_cache_event_update()
316 local64_add(delta, &event->count); in l2_cache_event_update()
319 static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, in l2_cache_cluster_set_period() argument
322 u32 idx = hwc->idx; in l2_cache_cluster_set_period()
335 local64_set(&hwc->prev_count, new); in l2_cache_cluster_set_period()
339 static int l2_cache_get_event_idx(struct cluster_pmu *cluster, in l2_cache_get_event_idx() argument
342 struct hw_perf_event *hwc = &event->hw; in l2_cache_get_event_idx()
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
347 if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { in l2_cache_get_event_idx()
348 if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) in l2_cache_get_event_idx()
349 return -EAGAIN; in l2_cache_get_event_idx()
354 idx = find_first_zero_bit(cluster->used_counters, num_ctrs); in l2_cache_get_event_idx()
357 return -EAGAIN; in l2_cache_get_event_idx()
364 group = L2_EVT_GROUP(hwc->config_base); in l2_cache_get_event_idx()
365 if (test_bit(group, cluster->used_groups)) in l2_cache_get_event_idx()
366 return -EAGAIN; in l2_cache_get_event_idx()
368 set_bit(idx, cluster->used_counters); in l2_cache_get_event_idx()
369 set_bit(group, cluster->used_groups); in l2_cache_get_event_idx()
374 static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, in l2_cache_clear_event_idx() argument
377 struct hw_perf_event *hwc = &event->hw; in l2_cache_clear_event_idx()
378 int idx = hwc->idx; in l2_cache_clear_event_idx()
380 clear_bit(idx, cluster->used_counters); in l2_cache_clear_event_idx()
381 if (hwc->config_base != L2CYCLE_CTR_RAW_CODE) in l2_cache_clear_event_idx()
382 clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups); in l2_cache_clear_event_idx()
387 struct cluster_pmu *cluster = data; in l2_cache_handle_irq() local
388 int num_counters = cluster->l2cache_pmu->num_counters; in l2_cache_handle_irq()
396 for_each_set_bit(idx, cluster->used_counters, num_counters) { in l2_cache_handle_irq()
397 struct perf_event *event = cluster->events[idx]; in l2_cache_handle_irq()
407 hwc = &event->hw; in l2_cache_handle_irq()
409 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_handle_irq()
424 * physical PMUs (per cluster), because we do not support per-task mode in l2_cache_pmu_enable()
440 struct hw_perf_event *hwc = &event->hw; in l2_cache_event_init()
441 struct cluster_pmu *cluster; in l2_cache_event_init() local
445 if (event->attr.type != event->pmu->type) in l2_cache_event_init()
446 return -ENOENT; in l2_cache_event_init()
448 l2cache_pmu = to_l2cache_pmu(event->pmu); in l2_cache_event_init()
450 if (hwc->sample_period) { in l2_cache_event_init()
451 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
453 return -EOPNOTSUPP; in l2_cache_event_init()
456 if (event->cpu < 0) { in l2_cache_event_init()
457 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
458 "Per-task mode not supported\n"); in l2_cache_event_init()
459 return -EOPNOTSUPP; in l2_cache_event_init()
462 if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) || in l2_cache_event_init()
463 ((event->attr.config & ~L2_EVT_MASK) != 0)) && in l2_cache_event_init()
464 (event->attr.config != L2CYCLE_CTR_RAW_CODE)) { in l2_cache_event_init()
465 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
467 event->attr.config); in l2_cache_event_init()
468 return -EINVAL; in l2_cache_event_init()
472 if (event->group_leader->pmu != event->pmu && in l2_cache_event_init()
473 !is_software_event(event->group_leader)) { in l2_cache_event_init()
474 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
476 return -EINVAL; in l2_cache_event_init()
479 for_each_sibling_event(sibling, event->group_leader) { in l2_cache_event_init()
480 if (sibling->pmu != event->pmu && in l2_cache_event_init()
482 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
484 return -EINVAL; in l2_cache_event_init()
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu); in l2_cache_event_init()
489 if (!cluster) { in l2_cache_event_init()
491 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
492 "CPU%d not associated with L2 cluster\n", event->cpu); in l2_cache_event_init()
493 return -EINVAL; in l2_cache_event_init()
497 if ((event->group_leader != event) && in l2_cache_event_init()
498 (cluster->on_cpu != event->group_leader->cpu)) { in l2_cache_event_init()
499 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
501 event->cpu, event->group_leader->cpu); in l2_cache_event_init()
502 return -EINVAL; in l2_cache_event_init()
505 if ((event != event->group_leader) && in l2_cache_event_init()
506 !is_software_event(event->group_leader) && in l2_cache_event_init()
507 (L2_EVT_GROUP(event->group_leader->attr.config) == in l2_cache_event_init()
508 L2_EVT_GROUP(event->attr.config))) { in l2_cache_event_init()
509 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
511 event->group_leader->attr.config, in l2_cache_event_init()
512 event->attr.config); in l2_cache_event_init()
513 return -EINVAL; in l2_cache_event_init()
516 for_each_sibling_event(sibling, event->group_leader) { in l2_cache_event_init()
519 (L2_EVT_GROUP(sibling->attr.config) == in l2_cache_event_init()
520 L2_EVT_GROUP(event->attr.config))) { in l2_cache_event_init()
521 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
523 sibling->attr.config, in l2_cache_event_init()
524 event->attr.config); in l2_cache_event_init()
525 return -EINVAL; in l2_cache_event_init()
529 hwc->idx = -1; in l2_cache_event_init()
530 hwc->config_base = event->attr.config; in l2_cache_event_init()
536 event->cpu = cluster->on_cpu; in l2_cache_event_init()
543 struct cluster_pmu *cluster; in l2_cache_event_start() local
544 struct hw_perf_event *hwc = &event->hw; in l2_cache_event_start()
545 int idx = hwc->idx; in l2_cache_event_start()
549 hwc->state = 0; in l2_cache_event_start()
551 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_start()
553 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_event_start()
555 if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { in l2_cache_event_start()
558 config = hwc->config_base; in l2_cache_event_start()
564 cluster_pmu_set_resr(cluster, event_group, event_cc); in l2_cache_event_start()
574 struct hw_perf_event *hwc = &event->hw; in l2_cache_event_stop()
575 int idx = hwc->idx; in l2_cache_event_stop()
577 if (hwc->state & PERF_HES_STOPPED) in l2_cache_event_stop()
585 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in l2_cache_event_stop()
590 struct hw_perf_event *hwc = &event->hw; in l2_cache_event_add()
593 struct cluster_pmu *cluster; in l2_cache_event_add() local
595 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_add()
597 idx = l2_cache_get_event_idx(cluster, event); in l2_cache_event_add()
601 hwc->idx = idx; in l2_cache_event_add()
602 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in l2_cache_event_add()
603 cluster->events[idx] = event; in l2_cache_event_add()
604 local64_set(&hwc->prev_count, 0); in l2_cache_event_add()
617 struct hw_perf_event *hwc = &event->hw; in l2_cache_event_del()
618 struct cluster_pmu *cluster; in l2_cache_event_del() local
619 int idx = hwc->idx; in l2_cache_event_del()
621 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_del()
624 cluster->events[idx] = NULL; in l2_cache_event_del()
625 l2_cache_clear_event_idx(cluster, event); in l2_cache_event_del()
641 return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask); in l2_cache_pmu_cpumask_show()
657 PMU_FORMAT_ATTR(l2_code, "config:4-11");
658 PMU_FORMAT_ATTR(l2_group, "config:0-3");
659 PMU_FORMAT_ATTR(event, "config:0-11");
679 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); in l2cache_pmu_event_show()
687 L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
688 L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
691 L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
692 L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
693 L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
739 struct cluster_pmu *cluster; in l2_cache_associate_cpu_with_cluster() local
743 * single-threaded cores, and MPIDR[aff2] for multi-threaded in l2_cache_associate_cpu_with_cluster()
752 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
753 if (cluster->cluster_id != cpu_cluster_id) in l2_cache_associate_cpu_with_cluster()
756 dev_info(&l2cache_pmu->pdev->dev, in l2_cache_associate_cpu_with_cluster()
757 "CPU%d associated with cluster %d\n", cpu, in l2_cache_associate_cpu_with_cluster()
758 cluster->cluster_id); in l2_cache_associate_cpu_with_cluster()
759 cpumask_set_cpu(cpu, &cluster->cluster_cpus); in l2_cache_associate_cpu_with_cluster()
760 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; in l2_cache_associate_cpu_with_cluster()
761 return cluster; in l2_cache_associate_cpu_with_cluster()
769 struct cluster_pmu *cluster; in l2cache_pmu_online_cpu() local
773 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
774 if (!cluster) { in l2cache_pmu_online_cpu()
776 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
777 if (!cluster) { in l2cache_pmu_online_cpu()
778 /* Only if broken firmware doesn't list every cluster */ in l2cache_pmu_online_cpu()
779 WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu); in l2cache_pmu_online_cpu()
784 /* If another CPU is managing this cluster, we're done */ in l2cache_pmu_online_cpu()
785 if (cluster->on_cpu != -1) in l2cache_pmu_online_cpu()
789 * All CPUs on this cluster were down, use this one. in l2cache_pmu_online_cpu()
792 cluster->on_cpu = cpu; in l2cache_pmu_online_cpu()
793 cpumask_set_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_online_cpu()
796 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); in l2cache_pmu_online_cpu()
797 enable_irq(cluster->irq); in l2cache_pmu_online_cpu()
804 struct cluster_pmu *cluster; in l2cache_pmu_offline_cpu() local
810 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_offline_cpu()
811 if (!cluster) in l2cache_pmu_offline_cpu()
814 /* If this CPU is not managing the cluster, we're done */ in l2cache_pmu_offline_cpu()
815 if (cluster->on_cpu != cpu) in l2cache_pmu_offline_cpu()
818 /* Give up ownership of cluster */ in l2cache_pmu_offline_cpu()
819 cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
820 cluster->on_cpu = -1; in l2cache_pmu_offline_cpu()
822 /* Any other CPU for this cluster which is still online */ in l2cache_pmu_offline_cpu()
823 cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus, in l2cache_pmu_offline_cpu()
827 disable_irq(cluster->irq); in l2cache_pmu_offline_cpu()
831 perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target); in l2cache_pmu_offline_cpu()
832 cluster->on_cpu = target; in l2cache_pmu_offline_cpu()
833 cpumask_set_cpu(target, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
834 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); in l2cache_pmu_offline_cpu()
841 struct platform_device *pdev = to_platform_device(dev->parent); in l2_cache_pmu_probe_cluster()
844 struct cluster_pmu *cluster; in l2_cache_pmu_probe_cluster() local
851 dev_err(&pdev->dev, "unable to read ACPI uid\n"); in l2_cache_pmu_probe_cluster()
855 cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); in l2_cache_pmu_probe_cluster()
856 if (!cluster) in l2_cache_pmu_probe_cluster()
857 return -ENOMEM; in l2_cache_pmu_probe_cluster()
859 INIT_LIST_HEAD(&cluster->next); in l2_cache_pmu_probe_cluster()
860 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
861 cluster->cluster_id = fw_cluster_id; in l2_cache_pmu_probe_cluster()
866 cluster->irq = irq; in l2_cache_pmu_probe_cluster()
868 cluster->l2cache_pmu = l2cache_pmu; in l2_cache_pmu_probe_cluster()
869 cluster->on_cpu = -1; in l2_cache_pmu_probe_cluster()
871 err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq, in l2_cache_pmu_probe_cluster()
874 "l2-cache-pmu", cluster); in l2_cache_pmu_probe_cluster()
876 dev_err(&pdev->dev, in l2_cache_pmu_probe_cluster()
881 dev_info(&pdev->dev, in l2_cache_pmu_probe_cluster()
882 "Registered L2 cache PMU cluster %lld\n", fw_cluster_id); in l2_cache_pmu_probe_cluster()
884 spin_lock_init(&cluster->pmu_lock); in l2_cache_pmu_probe_cluster()
886 l2cache_pmu->num_pmus++; in l2_cache_pmu_probe_cluster()
897 devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL); in l2_cache_pmu_probe()
899 return -ENOMEM; in l2_cache_pmu_probe()
901 INIT_LIST_HEAD(&l2cache_pmu->clusters); in l2_cache_pmu_probe()
904 l2cache_pmu->pmu = (struct pmu) { in l2_cache_pmu_probe()
920 l2cache_pmu->num_counters = get_num_counters(); in l2_cache_pmu_probe()
921 l2cache_pmu->pdev = pdev; in l2_cache_pmu_probe()
922 l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev, in l2_cache_pmu_probe()
924 if (!l2cache_pmu->pmu_cluster) in l2_cache_pmu_probe()
925 return -ENOMEM; in l2_cache_pmu_probe()
927 l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1; in l2_cache_pmu_probe()
928 l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) | in l2_cache_pmu_probe()
931 cpumask_clear(&l2cache_pmu->cpumask); in l2_cache_pmu_probe()
933 /* Read cluster info and initialize each cluster */ in l2_cache_pmu_probe()
934 err = device_for_each_child(&pdev->dev, l2cache_pmu, in l2_cache_pmu_probe()
939 if (l2cache_pmu->num_pmus == 0) { in l2_cache_pmu_probe()
940 dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n"); in l2_cache_pmu_probe()
941 return -ENODEV; in l2_cache_pmu_probe()
945 &l2cache_pmu->node); in l2_cache_pmu_probe()
947 dev_err(&pdev->dev, "Error %d registering hotplug", err); in l2_cache_pmu_probe()
951 err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1); in l2_cache_pmu_probe()
953 dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err); in l2_cache_pmu_probe()
957 dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n", in l2_cache_pmu_probe()
958 l2cache_pmu->num_pmus); in l2_cache_pmu_probe()
964 &l2cache_pmu->node); in l2_cache_pmu_probe()
973 perf_pmu_unregister(&l2cache_pmu->pmu); in l2_cache_pmu_remove()
975 &l2cache_pmu->node); in l2_cache_pmu_remove()
981 .name = "qcom-l2cache-pmu",