Lines Matching +full:interrupt +full:- +full:affinity

1 // SPDX-License-Identifier: GPL-2.0
30 int ret = -ENODEV; in probe_current_pmu()
34 for (; info->init != NULL; info++) { in probe_current_pmu()
35 if ((cpuid & info->mask) != info->cpuid) in probe_current_pmu()
37 ret = info->init(pmu); in probe_current_pmu()
48 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_percpu_irq()
50 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); in pmu_parse_percpu_irq()
54 for_each_cpu(cpu, &pmu->supported_cpus) in pmu_parse_percpu_irq()
55 per_cpu(hw_events->irq, cpu) = irq; in pmu_parse_percpu_irq()
62 return !!of_find_property(node, "interrupt-affinity", NULL); in pmu_has_irq_affinity()
71 * If we don't have an interrupt-affinity property, we guess irq in pmu_parse_irq_affinity()
72 * affinity matches our logical CPU order, as we used to assume. in pmu_parse_irq_affinity()
75 if (!pmu_has_irq_affinity(dev->of_node)) in pmu_parse_irq_affinity()
78 dn = of_parse_phandle(dev->of_node, "interrupt-affinity", i); in pmu_parse_irq_affinity()
80 dev_warn(dev, "failed to parse interrupt-affinity[%d]\n", i); in pmu_parse_irq_affinity()
81 return -EINVAL; in pmu_parse_irq_affinity()
98 struct platform_device *pdev = pmu->plat_device; in pmu_parse_irqs()
99 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_irqs()
100 struct device *dev = &pdev->dev; in pmu_parse_irqs()
112 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; in pmu_parse_irqs()
113 cpumask_setall(&pmu->supported_cpus); in pmu_parse_irqs()
123 if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node)) in pmu_parse_irqs()
124 dev_warn(dev, "no interrupt-affinity property, guessing.\n"); in pmu_parse_irqs()
135 return -EINVAL; in pmu_parse_irqs()
144 if (per_cpu(hw_events->irq, cpu)) { in pmu_parse_irqs()
146 return -EINVAL; in pmu_parse_irqs()
149 per_cpu(hw_events->irq, cpu) = irq; in pmu_parse_irqs()
150 cpumask_set_cpu(cpu, &pmu->supported_cpus); in pmu_parse_irqs()
158 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; in armpmu_request_irqs()
161 for_each_cpu(cpu, &armpmu->supported_cpus) { in armpmu_request_irqs()
162 int irq = per_cpu(hw_events->irq, cpu); in armpmu_request_irqs()
177 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; in armpmu_free_irqs()
179 for_each_cpu(cpu, &armpmu->supported_cpus) { in armpmu_free_irqs()
180 int irq = per_cpu(hw_events->irq, cpu); in armpmu_free_irqs()
191 struct device *dev = &pdev->dev; in arm_pmu_device_probe()
193 int ret = -ENODEV; in arm_pmu_device_probe()
197 return -ENOMEM; in arm_pmu_device_probe()
199 pmu->plat_device = pdev; in arm_pmu_device_probe()
207 pmu->secure_access = of_property_read_bool(dev->of_node, in arm_pmu_device_probe()
208 "secure-reg-access"); in arm_pmu_device_probe()
210 /* arm64 systems boot only as non-secure */ in arm_pmu_device_probe()
211 if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { in arm_pmu_device_probe()
212 dev_warn(dev, "ignoring \"secure-reg-access\" property for arm64\n"); in arm_pmu_device_probe()
213 pmu->secure_access = false; in arm_pmu_device_probe()
218 cpumask_setall(&pmu->supported_cpus); in arm_pmu_device_probe()