Lines Matching refs:cluster
293 static void cluster_pmu_set_resr(struct cluster_pmu *cluster, in cluster_pmu_set_resr() argument
304 spin_lock_irqsave(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
312 spin_unlock_irqrestore(&cluster->pmu_lock, flags); in cluster_pmu_set_resr()
369 static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, in l2_cache_cluster_set_period() argument
389 static int l2_cache_get_event_idx(struct cluster_pmu *cluster, in l2_cache_get_event_idx() argument
394 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
398 if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) in l2_cache_get_event_idx()
404 idx = find_first_zero_bit(cluster->used_counters, num_ctrs); in l2_cache_get_event_idx()
415 if (test_bit(group, cluster->used_groups)) in l2_cache_get_event_idx()
418 set_bit(idx, cluster->used_counters); in l2_cache_get_event_idx()
419 set_bit(group, cluster->used_groups); in l2_cache_get_event_idx()
424 static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, in l2_cache_clear_event_idx() argument
430 clear_bit(idx, cluster->used_counters); in l2_cache_clear_event_idx()
432 clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups); in l2_cache_clear_event_idx()
437 struct cluster_pmu *cluster = data; in l2_cache_handle_irq() local
438 int num_counters = cluster->l2cache_pmu->num_counters; in l2_cache_handle_irq()
446 for_each_set_bit(idx, cluster->used_counters, num_counters) { in l2_cache_handle_irq()
447 struct perf_event *event = cluster->events[idx]; in l2_cache_handle_irq()
459 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_handle_irq()
491 struct cluster_pmu *cluster; in l2_cache_event_init() local
546 cluster = get_cluster_pmu(l2cache_pmu, event->cpu); in l2_cache_event_init()
547 if (!cluster) { in l2_cache_event_init()
556 (cluster->on_cpu != event->group_leader->cpu)) { in l2_cache_event_init()
594 event->cpu = cluster->on_cpu; in l2_cache_event_init()
601 struct cluster_pmu *cluster; in l2_cache_event_start() local
609 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_start()
611 l2_cache_cluster_set_period(cluster, hwc); in l2_cache_event_start()
622 cluster_pmu_set_resr(cluster, event_group, event_cc); in l2_cache_event_start()
651 struct cluster_pmu *cluster; in l2_cache_event_add() local
653 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_add()
655 idx = l2_cache_get_event_idx(cluster, event); in l2_cache_event_add()
661 cluster->events[idx] = event; in l2_cache_event_add()
676 struct cluster_pmu *cluster; in l2_cache_event_del() local
679 cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); in l2_cache_event_del()
682 cluster->events[idx] = NULL; in l2_cache_event_del()
683 l2_cache_clear_event_idx(cluster, event); in l2_cache_event_del()
800 struct cluster_pmu *cluster = NULL; in l2_cache_associate_cpu_with_cluster() local
813 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
814 if (cluster->cluster_id != cpu_cluster_id) in l2_cache_associate_cpu_with_cluster()
819 cluster->cluster_id); in l2_cache_associate_cpu_with_cluster()
820 cpumask_set_cpu(cpu, &cluster->cluster_cpus); in l2_cache_associate_cpu_with_cluster()
821 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; in l2_cache_associate_cpu_with_cluster()
825 return cluster; in l2_cache_associate_cpu_with_cluster()
830 struct cluster_pmu *cluster; in l2cache_pmu_online_cpu() local
834 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
835 if (!cluster) { in l2cache_pmu_online_cpu()
837 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
838 if (!cluster) { in l2cache_pmu_online_cpu()
846 if (cluster->on_cpu != -1) in l2cache_pmu_online_cpu()
853 cluster->on_cpu = cpu; in l2cache_pmu_online_cpu()
857 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); in l2cache_pmu_online_cpu()
858 enable_irq(cluster->irq); in l2cache_pmu_online_cpu()
865 struct cluster_pmu *cluster; in l2cache_pmu_offline_cpu() local
871 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_offline_cpu()
872 if (!cluster) in l2cache_pmu_offline_cpu()
876 if (cluster->on_cpu != cpu) in l2cache_pmu_offline_cpu()
881 cluster->on_cpu = -1; in l2cache_pmu_offline_cpu()
884 cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus, in l2cache_pmu_offline_cpu()
888 disable_irq(cluster->irq); in l2cache_pmu_offline_cpu()
893 cluster->on_cpu = target; in l2cache_pmu_offline_cpu()
895 WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); in l2cache_pmu_offline_cpu()
905 struct cluster_pmu *cluster; in l2_cache_pmu_probe_cluster() local
919 cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); in l2_cache_pmu_probe_cluster()
920 if (!cluster) in l2_cache_pmu_probe_cluster()
923 INIT_LIST_HEAD(&cluster->next); in l2_cache_pmu_probe_cluster()
924 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
925 cluster->cluster_id = fw_cluster_id; in l2_cache_pmu_probe_cluster()
935 cluster->irq = irq; in l2_cache_pmu_probe_cluster()
937 cluster->l2cache_pmu = l2cache_pmu; in l2_cache_pmu_probe_cluster()
938 cluster->on_cpu = -1; in l2_cache_pmu_probe_cluster()
942 "l2-cache-pmu", cluster); in l2_cache_pmu_probe_cluster()
952 spin_lock_init(&cluster->pmu_lock); in l2_cache_pmu_probe_cluster()