1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * linux/arch/arm/include/asm/pmu.h
4 *
5 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
6 */
7
8 #ifndef __ARM_PMU_H__
9 #define __ARM_PMU_H__
10
11 #include <linux/interrupt.h>
12 #include <linux/perf_event.h>
13 #include <linux/platform_device.h>
14 #include <linux/sysfs.h>
15 #include <asm/cputype.h>
16
17 #ifdef CONFIG_ARM_PMU
18
19 /*
20 * The ARMv7 CPU PMU supports up to 32 event counters.
21 */
22 #define ARMPMU_MAX_HWEVENTS 32
23
24 /*
25 * ARM PMU hw_event flags
26 */
27 #define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
28 #define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
29
30 static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
31 static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
32
33 #define HW_OP_UNSUPPORTED 0xFFFF
34 #define C(_x) PERF_COUNT_HW_CACHE_##_x
35 #define CACHE_OP_UNSUPPORTED 0xFFFF
36
37 #define PERF_MAP_ALL_UNSUPPORTED \
38 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
39
40 #define PERF_CACHE_MAP_ALL_UNSUPPORTED \
41 [0 ... C(MAX) - 1] = { \
42 [0 ... C(OP_MAX) - 1] = { \
43 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
44 }, \
45 }
46
47 /* The events for a given PMU register set. */
48 struct pmu_hw_events {
49 /*
50 * The events that are active on the PMU for the given index.
51 */
52 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
53
54 /*
55 * A 1 bit for an index indicates that the counter is being used for
56 * an event. A 0 means that the counter can be used.
57 */
58 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
59
60 /*
61 * Hardware lock to serialize accesses to PMU registers. Needed for the
62 * read/modify/write sequences.
63 */
64 raw_spinlock_t pmu_lock;
65
66 /*
67 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
68 * already have to allocate this struct per cpu.
69 */
70 struct arm_pmu *percpu_pmu;
71
72 int irq;
73 };
74
75 enum armpmu_attr_groups {
76 ARMPMU_ATTR_GROUP_COMMON,
77 ARMPMU_ATTR_GROUP_EVENTS,
78 ARMPMU_ATTR_GROUP_FORMATS,
79 ARMPMU_ATTR_GROUP_CAPS,
80 ARMPMU_NR_ATTR_GROUPS
81 };
82
83 struct arm_pmu {
84 struct pmu pmu;
85 cpumask_t supported_cpus;
86 char *name;
87 int pmuver;
88 irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
89 void (*enable)(struct perf_event *event);
90 void (*disable)(struct perf_event *event);
91 int (*get_event_idx)(struct pmu_hw_events *hw_events,
92 struct perf_event *event);
93 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
94 struct perf_event *event);
95 int (*set_event_filter)(struct hw_perf_event *evt,
96 struct perf_event_attr *attr);
97 u64 (*read_counter)(struct perf_event *event);
98 void (*write_counter)(struct perf_event *event, u64 val);
99 void (*start)(struct arm_pmu *);
100 void (*stop)(struct arm_pmu *);
101 void (*reset)(void *);
102 int (*map_event)(struct perf_event *event);
103 int (*filter_match)(struct perf_event *event);
104 int num_events;
105 bool secure_access; /* 32-bit ARM only */
106 #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
107 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
108 #define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
109 DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
110 struct platform_device *plat_device;
111 struct pmu_hw_events __percpu *hw_events;
112 struct hlist_node node;
113 struct notifier_block cpu_pm_nb;
114 /* the attr_groups array must be NULL-terminated */
115 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
116 /* store the PMMIR_EL1 to expose slots */
117 u64 reg_pmmir;
118
119 /* Only to be used by ACPI probing code */
120 unsigned long acpi_cpuid;
121 };
122
123 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
124
125 u64 armpmu_event_update(struct perf_event *event);
126
127 int armpmu_event_set_period(struct perf_event *event);
128
129 int armpmu_map_event(struct perf_event *event,
130 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
131 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
132 [PERF_COUNT_HW_CACHE_OP_MAX]
133 [PERF_COUNT_HW_CACHE_RESULT_MAX],
134 u32 raw_event_mask);
135
136 typedef int (*armpmu_init_fn)(struct arm_pmu *);
137
138 struct pmu_probe_info {
139 unsigned int cpuid;
140 unsigned int mask;
141 armpmu_init_fn init;
142 };
143
144 #define PMU_PROBE(_cpuid, _mask, _fn) \
145 { \
146 .cpuid = (_cpuid), \
147 .mask = (_mask), \
148 .init = (_fn), \
149 }
150
151 #define ARM_PMU_PROBE(_cpuid, _fn) \
152 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
153
154 #define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
155
156 #define XSCALE_PMU_PROBE(_version, _fn) \
157 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
158
159 int arm_pmu_device_probe(struct platform_device *pdev,
160 const struct of_device_id *of_table,
161 const struct pmu_probe_info *probe_table);
162
163 #ifdef CONFIG_ACPI
164 int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
165 #else
arm_pmu_acpi_probe(armpmu_init_fn init_fn)166 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
167 #endif
168
169 #ifdef CONFIG_KVM
170 void kvm_host_pmu_init(struct arm_pmu *pmu);
171 #else
172 #define kvm_host_pmu_init(x) do { } while(0)
173 #endif
174
175 /* Internal functions only for core arm_pmu code */
176 struct arm_pmu *armpmu_alloc(void);
177 struct arm_pmu *armpmu_alloc_atomic(void);
178 void armpmu_free(struct arm_pmu *pmu);
179 int armpmu_register(struct arm_pmu *pmu);
180 int armpmu_request_irq(int irq, int cpu);
181 void armpmu_free_irq(int irq, int cpu);
182
183 #define ARMV8_PMU_PDEV_NAME "armv8-pmu"
184
185 #endif /* CONFIG_ARM_PMU */
186
187 #define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
188
189 #endif /* __ARM_PMU_H__ */
190