1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon SoC UC (unified cache) uncore Hardware event counters support
4 *
5 * Copyright (C) 2023 HiSilicon Limited
6 *
7 * This code is based on the uncore PMUs like hisi_uncore_l3c_pmu.
8 */
9 #include <linux/cpuhotplug.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/list.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/property.h>
15
16 #include "hisi_uncore_pmu.h"
17
18 /* Dynamic CPU hotplug state used by UC PMU */
19 static enum cpuhp_state hisi_uc_pmu_online;
20
21 /* UC register definition */
22 #define HISI_UC_INT_MASK_REG 0x0800
23 #define HISI_UC_INT_STS_REG 0x0808
24 #define HISI_UC_INT_CLEAR_REG 0x080c
25 #define HISI_UC_TRACETAG_CTRL_REG 0x1b2c
26 #define HISI_UC_TRACETAG_REQ_MSK GENMASK(9, 7)
27 #define HISI_UC_TRACETAG_MARK_EN BIT(0)
28 #define HISI_UC_TRACETAG_REQ_EN (HISI_UC_TRACETAG_MARK_EN | BIT(2))
29 #define HISI_UC_TRACETAG_SRCID_EN BIT(3)
30 #define HISI_UC_SRCID_CTRL_REG 0x1b40
31 #define HISI_UC_SRCID_MSK GENMASK(14, 1)
32 #define HISI_UC_EVENT_CTRL_REG 0x1c00
33 #define HISI_UC_EVENT_TRACETAG_EN BIT(29)
34 #define HISI_UC_EVENT_URING_MSK GENMASK(28, 27)
35 #define HISI_UC_EVENT_GLB_EN BIT(26)
36 #define HISI_UC_VERSION_REG 0x1cf0
37 #define HISI_UC_EVTYPE_REGn(n) (0x1d00 + (n) * 4)
38 #define HISI_UC_EVTYPE_MASK GENMASK(7, 0)
39 #define HISI_UC_CNTR_REGn(n) (0x1e00 + (n) * 8)
40
41 #define HISI_UC_NR_COUNTERS 0x8
42 #define HISI_UC_V2_NR_EVENTS 0xFF
43 #define HISI_UC_CNTR_REG_BITS 64
44
45 #define HISI_UC_RD_REQ_TRACETAG 0x4
46 #define HISI_UC_URING_EVENT_MIN 0x47
47 #define HISI_UC_URING_EVENT_MAX 0x59
48
49 HISI_PMU_EVENT_ATTR_EXTRACTOR(rd_req_en, config1, 0, 0);
50 HISI_PMU_EVENT_ATTR_EXTRACTOR(uring_channel, config1, 5, 4);
51 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid, config1, 19, 6);
52 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_en, config1, 20, 20);
53
hisi_uc_pmu_check_filter(struct perf_event * event)54 static int hisi_uc_pmu_check_filter(struct perf_event *event)
55 {
56 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
57
58 if (hisi_get_srcid_en(event) && !hisi_get_rd_req_en(event)) {
59 dev_err(uc_pmu->dev,
60 "rcid_en depends on rd_req_en being enabled!\n");
61 return -EINVAL;
62 }
63
64 if (!hisi_get_uring_channel(event))
65 return 0;
66
67 if ((HISI_GET_EVENTID(event) < HISI_UC_URING_EVENT_MIN) ||
68 (HISI_GET_EVENTID(event) > HISI_UC_URING_EVENT_MAX))
69 dev_warn(uc_pmu->dev,
70 "Only events: [%#x ~ %#x] support channel filtering!",
71 HISI_UC_URING_EVENT_MIN, HISI_UC_URING_EVENT_MAX);
72
73 return 0;
74 }
75
hisi_uc_pmu_config_req_tracetag(struct perf_event * event)76 static void hisi_uc_pmu_config_req_tracetag(struct perf_event *event)
77 {
78 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
79 u32 val;
80
81 if (!hisi_get_rd_req_en(event))
82 return;
83
84 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
85
86 /* The request-type has been configured */
87 if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == HISI_UC_RD_REQ_TRACETAG)
88 return;
89
90 /* Set request-type for tracetag, only read request is supported! */
91 val &= ~HISI_UC_TRACETAG_REQ_MSK;
92 val |= FIELD_PREP(HISI_UC_TRACETAG_REQ_MSK, HISI_UC_RD_REQ_TRACETAG);
93 val |= HISI_UC_TRACETAG_REQ_EN;
94 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
95 }
96
hisi_uc_pmu_clear_req_tracetag(struct perf_event * event)97 static void hisi_uc_pmu_clear_req_tracetag(struct perf_event *event)
98 {
99 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
100 u32 val;
101
102 if (!hisi_get_rd_req_en(event))
103 return;
104
105 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
106
107 /* Do nothing, the request-type tracetag has been cleaned up */
108 if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == 0)
109 return;
110
111 /* Clear request-type */
112 val &= ~HISI_UC_TRACETAG_REQ_MSK;
113 val &= ~HISI_UC_TRACETAG_REQ_EN;
114 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
115 }
116
hisi_uc_pmu_config_srcid_tracetag(struct perf_event * event)117 static void hisi_uc_pmu_config_srcid_tracetag(struct perf_event *event)
118 {
119 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
120 u32 val;
121
122 if (!hisi_get_srcid_en(event))
123 return;
124
125 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
126
127 /* Do nothing, the source id has been configured */
128 if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val))
129 return;
130
131 /* Enable source id tracetag */
132 val |= HISI_UC_TRACETAG_SRCID_EN;
133 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
134
135 val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
136 val &= ~HISI_UC_SRCID_MSK;
137 val |= FIELD_PREP(HISI_UC_SRCID_MSK, hisi_get_srcid(event));
138 writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
139
140 /* Depend on request-type tracetag enabled */
141 hisi_uc_pmu_config_req_tracetag(event);
142 }
143
hisi_uc_pmu_clear_srcid_tracetag(struct perf_event * event)144 static void hisi_uc_pmu_clear_srcid_tracetag(struct perf_event *event)
145 {
146 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
147 u32 val;
148
149 if (!hisi_get_srcid_en(event))
150 return;
151
152 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
153
154 /* Do nothing, the source id has been cleaned up */
155 if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val) == 0)
156 return;
157
158 hisi_uc_pmu_clear_req_tracetag(event);
159
160 /* Disable source id tracetag */
161 val &= ~HISI_UC_TRACETAG_SRCID_EN;
162 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
163
164 val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
165 val &= ~HISI_UC_SRCID_MSK;
166 writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
167 }
168
hisi_uc_pmu_config_uring_channel(struct perf_event * event)169 static void hisi_uc_pmu_config_uring_channel(struct perf_event *event)
170 {
171 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
172 u32 uring_channel = hisi_get_uring_channel(event);
173 u32 val;
174
175 /* Do nothing if not being set or is set explicitly to zero (default) */
176 if (uring_channel == 0)
177 return;
178
179 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
180
181 /* Do nothing, the uring_channel has been configured */
182 if (uring_channel == FIELD_GET(HISI_UC_EVENT_URING_MSK, val))
183 return;
184
185 val &= ~HISI_UC_EVENT_URING_MSK;
186 val |= FIELD_PREP(HISI_UC_EVENT_URING_MSK, uring_channel);
187 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
188 }
189
hisi_uc_pmu_clear_uring_channel(struct perf_event * event)190 static void hisi_uc_pmu_clear_uring_channel(struct perf_event *event)
191 {
192 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
193 u32 val;
194
195 /* Do nothing if not being set or is set explicitly to zero (default) */
196 if (hisi_get_uring_channel(event) == 0)
197 return;
198
199 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
200
201 /* Do nothing, the uring_channel has been cleaned up */
202 if (FIELD_GET(HISI_UC_EVENT_URING_MSK, val) == 0)
203 return;
204
205 val &= ~HISI_UC_EVENT_URING_MSK;
206 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
207 }
208
hisi_uc_pmu_enable_filter(struct perf_event * event)209 static void hisi_uc_pmu_enable_filter(struct perf_event *event)
210 {
211 if (event->attr.config1 == 0)
212 return;
213
214 hisi_uc_pmu_config_uring_channel(event);
215 hisi_uc_pmu_config_req_tracetag(event);
216 hisi_uc_pmu_config_srcid_tracetag(event);
217 }
218
hisi_uc_pmu_disable_filter(struct perf_event * event)219 static void hisi_uc_pmu_disable_filter(struct perf_event *event)
220 {
221 if (event->attr.config1 == 0)
222 return;
223
224 hisi_uc_pmu_clear_srcid_tracetag(event);
225 hisi_uc_pmu_clear_req_tracetag(event);
226 hisi_uc_pmu_clear_uring_channel(event);
227 }
228
hisi_uc_pmu_write_evtype(struct hisi_pmu * uc_pmu,int idx,u32 type)229 static void hisi_uc_pmu_write_evtype(struct hisi_pmu *uc_pmu, int idx, u32 type)
230 {
231 u32 val;
232
233 /*
234 * Select the appropriate event select register.
235 * There are 2 32-bit event select registers for the
236 * 8 hardware counters, each event code is 8-bit wide.
237 */
238 val = readl(uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
239 val &= ~(HISI_UC_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
240 val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
241 writel(val, uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
242 }
243
hisi_uc_pmu_start_counters(struct hisi_pmu * uc_pmu)244 static void hisi_uc_pmu_start_counters(struct hisi_pmu *uc_pmu)
245 {
246 u32 val;
247
248 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
249 val |= HISI_UC_EVENT_GLB_EN;
250 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
251 }
252
hisi_uc_pmu_stop_counters(struct hisi_pmu * uc_pmu)253 static void hisi_uc_pmu_stop_counters(struct hisi_pmu *uc_pmu)
254 {
255 u32 val;
256
257 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
258 val &= ~HISI_UC_EVENT_GLB_EN;
259 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
260 }
261
hisi_uc_pmu_enable_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)262 static void hisi_uc_pmu_enable_counter(struct hisi_pmu *uc_pmu,
263 struct hw_perf_event *hwc)
264 {
265 u32 val;
266
267 /* Enable counter index */
268 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
269 val |= (1 << hwc->idx);
270 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
271 }
272
hisi_uc_pmu_disable_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)273 static void hisi_uc_pmu_disable_counter(struct hisi_pmu *uc_pmu,
274 struct hw_perf_event *hwc)
275 {
276 u32 val;
277
278 /* Clear counter index */
279 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
280 val &= ~(1 << hwc->idx);
281 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
282 }
283
hisi_uc_pmu_read_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)284 static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
285 struct hw_perf_event *hwc)
286 {
287 return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
288 }
289
hisi_uc_pmu_write_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc,u64 val)290 static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
291 struct hw_perf_event *hwc, u64 val)
292 {
293 writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
294 }
295
hisi_uc_pmu_enable_counter_int(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)296 static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
297 struct hw_perf_event *hwc)
298 {
299 u32 val;
300
301 val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
302 val &= ~(1 << hwc->idx);
303 writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
304 }
305
hisi_uc_pmu_disable_counter_int(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)306 static void hisi_uc_pmu_disable_counter_int(struct hisi_pmu *uc_pmu,
307 struct hw_perf_event *hwc)
308 {
309 u32 val;
310
311 val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
312 val |= (1 << hwc->idx);
313 writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
314 }
315
hisi_uc_pmu_get_int_status(struct hisi_pmu * uc_pmu)316 static u32 hisi_uc_pmu_get_int_status(struct hisi_pmu *uc_pmu)
317 {
318 return readl(uc_pmu->base + HISI_UC_INT_STS_REG);
319 }
320
hisi_uc_pmu_clear_int_status(struct hisi_pmu * uc_pmu,int idx)321 static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
322 {
323 writel(1 << idx, uc_pmu->base + HISI_UC_INT_CLEAR_REG);
324 }
325
hisi_uc_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * uc_pmu)326 static int hisi_uc_pmu_init_data(struct platform_device *pdev,
327 struct hisi_pmu *uc_pmu)
328 {
329 /*
330 * Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
331 * identify the topology information of UC PMU devices in the chip.
332 * They have some CCLs per SCCL and then 4 UC PMU per CCL.
333 */
334 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
335 &uc_pmu->sccl_id)) {
336 dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
337 return -EINVAL;
338 }
339
340 if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
341 &uc_pmu->ccl_id)) {
342 dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
343 return -EINVAL;
344 }
345
346 if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
347 &uc_pmu->sub_id)) {
348 dev_err(&pdev->dev, "Can not read sub-id!\n");
349 return -EINVAL;
350 }
351
352 uc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
353 if (IS_ERR(uc_pmu->base)) {
354 dev_err(&pdev->dev, "ioremap failed for uc_pmu resource\n");
355 return PTR_ERR(uc_pmu->base);
356 }
357
358 uc_pmu->identifier = readl(uc_pmu->base + HISI_UC_VERSION_REG);
359
360 return 0;
361 }
362
363 static struct attribute *hisi_uc_pmu_format_attr[] = {
364 HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
365 HISI_PMU_FORMAT_ATTR(rd_req_en, "config1:0-0"),
366 HISI_PMU_FORMAT_ATTR(uring_channel, "config1:4-5"),
367 HISI_PMU_FORMAT_ATTR(srcid, "config1:6-19"),
368 HISI_PMU_FORMAT_ATTR(srcid_en, "config1:20-20"),
369 NULL
370 };
371
372 static const struct attribute_group hisi_uc_pmu_format_group = {
373 .name = "format",
374 .attrs = hisi_uc_pmu_format_attr,
375 };
376
377 static struct attribute *hisi_uc_pmu_events_attr[] = {
378 HISI_PMU_EVENT_ATTR(sq_time, 0x00),
379 HISI_PMU_EVENT_ATTR(pq_time, 0x01),
380 HISI_PMU_EVENT_ATTR(hbm_time, 0x02),
381 HISI_PMU_EVENT_ATTR(iq_comp_time_cring, 0x03),
382 HISI_PMU_EVENT_ATTR(iq_comp_time_uring, 0x05),
383 HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
384 HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
385 HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
386 HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a),
387 HISI_PMU_EVENT_ATTR(cycles, 0x9c),
388 HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
389 HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
390 HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
391 HISI_PMU_EVENT_ATTR(cring_txdat_cnt, 0xfb),
392 HISI_PMU_EVENT_ATTR(uring_rxdat_cnt, 0xfc),
393 HISI_PMU_EVENT_ATTR(uring_txdat_cnt, 0xfd),
394 NULL
395 };
396
397 static const struct attribute_group hisi_uc_pmu_events_group = {
398 .name = "events",
399 .attrs = hisi_uc_pmu_events_attr,
400 };
401
402 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
403
404 static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
405 &dev_attr_cpumask.attr,
406 NULL,
407 };
408
409 static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
410 .attrs = hisi_uc_pmu_cpumask_attrs,
411 };
412
413 static struct device_attribute hisi_uc_pmu_identifier_attr =
414 __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
415
416 static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
417 &hisi_uc_pmu_identifier_attr.attr,
418 NULL
419 };
420
421 static const struct attribute_group hisi_uc_pmu_identifier_group = {
422 .attrs = hisi_uc_pmu_identifier_attrs,
423 };
424
425 static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
426 &hisi_uc_pmu_format_group,
427 &hisi_uc_pmu_events_group,
428 &hisi_uc_pmu_cpumask_attr_group,
429 &hisi_uc_pmu_identifier_group,
430 NULL
431 };
432
433 static const struct hisi_uncore_ops hisi_uncore_uc_pmu_ops = {
434 .check_filter = hisi_uc_pmu_check_filter,
435 .write_evtype = hisi_uc_pmu_write_evtype,
436 .get_event_idx = hisi_uncore_pmu_get_event_idx,
437 .start_counters = hisi_uc_pmu_start_counters,
438 .stop_counters = hisi_uc_pmu_stop_counters,
439 .enable_counter = hisi_uc_pmu_enable_counter,
440 .disable_counter = hisi_uc_pmu_disable_counter,
441 .enable_counter_int = hisi_uc_pmu_enable_counter_int,
442 .disable_counter_int = hisi_uc_pmu_disable_counter_int,
443 .write_counter = hisi_uc_pmu_write_counter,
444 .read_counter = hisi_uc_pmu_read_counter,
445 .get_int_status = hisi_uc_pmu_get_int_status,
446 .clear_int_status = hisi_uc_pmu_clear_int_status,
447 .enable_filter = hisi_uc_pmu_enable_filter,
448 .disable_filter = hisi_uc_pmu_disable_filter,
449 };
450
hisi_uc_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * uc_pmu)451 static int hisi_uc_pmu_dev_probe(struct platform_device *pdev,
452 struct hisi_pmu *uc_pmu)
453 {
454 int ret;
455
456 ret = hisi_uc_pmu_init_data(pdev, uc_pmu);
457 if (ret)
458 return ret;
459
460 ret = hisi_uncore_pmu_init_irq(uc_pmu, pdev);
461 if (ret)
462 return ret;
463
464 uc_pmu->pmu_events.attr_groups = hisi_uc_pmu_attr_groups;
465 uc_pmu->check_event = HISI_UC_EVTYPE_MASK;
466 uc_pmu->ops = &hisi_uncore_uc_pmu_ops;
467 uc_pmu->counter_bits = HISI_UC_CNTR_REG_BITS;
468 uc_pmu->num_counters = HISI_UC_NR_COUNTERS;
469 uc_pmu->dev = &pdev->dev;
470 uc_pmu->on_cpu = -1;
471
472 return 0;
473 }
474
hisi_uc_pmu_remove_cpuhp_instance(void * hotplug_node)475 static void hisi_uc_pmu_remove_cpuhp_instance(void *hotplug_node)
476 {
477 cpuhp_state_remove_instance_nocalls(hisi_uc_pmu_online, hotplug_node);
478 }
479
hisi_uc_pmu_unregister_pmu(void * pmu)480 static void hisi_uc_pmu_unregister_pmu(void *pmu)
481 {
482 perf_pmu_unregister(pmu);
483 }
484
hisi_uc_pmu_probe(struct platform_device * pdev)485 static int hisi_uc_pmu_probe(struct platform_device *pdev)
486 {
487 struct hisi_pmu *uc_pmu;
488 char *name;
489 int ret;
490
491 uc_pmu = devm_kzalloc(&pdev->dev, sizeof(*uc_pmu), GFP_KERNEL);
492 if (!uc_pmu)
493 return -ENOMEM;
494
495 platform_set_drvdata(pdev, uc_pmu);
496
497 ret = hisi_uc_pmu_dev_probe(pdev, uc_pmu);
498 if (ret)
499 return ret;
500
501 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
502 uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
503 if (!name)
504 return -ENOMEM;
505
506 ret = cpuhp_state_add_instance(hisi_uc_pmu_online, &uc_pmu->node);
507 if (ret)
508 return dev_err_probe(&pdev->dev, ret, "Error registering hotplug\n");
509
510 ret = devm_add_action_or_reset(&pdev->dev,
511 hisi_uc_pmu_remove_cpuhp_instance,
512 &uc_pmu->node);
513 if (ret)
514 return ret;
515
516 hisi_pmu_init(uc_pmu, THIS_MODULE);
517
518 ret = perf_pmu_register(&uc_pmu->pmu, name, -1);
519 if (ret)
520 return ret;
521
522 return devm_add_action_or_reset(&pdev->dev,
523 hisi_uc_pmu_unregister_pmu,
524 &uc_pmu->pmu);
525 }
526
527 static const struct acpi_device_id hisi_uc_pmu_acpi_match[] = {
528 { "HISI0291", },
529 {}
530 };
531 MODULE_DEVICE_TABLE(acpi, hisi_uc_pmu_acpi_match);
532
533 static struct platform_driver hisi_uc_pmu_driver = {
534 .driver = {
535 .name = "hisi_uc_pmu",
536 .acpi_match_table = hisi_uc_pmu_acpi_match,
537 /*
538 * We have not worked out a safe bind/unbind process,
539 * Forcefully unbinding during sampling will lead to a
540 * kernel panic, so this is not supported yet.
541 */
542 .suppress_bind_attrs = true,
543 },
544 .probe = hisi_uc_pmu_probe,
545 };
546
hisi_uc_pmu_module_init(void)547 static int __init hisi_uc_pmu_module_init(void)
548 {
549 int ret;
550
551 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
552 "perf/hisi/uc:online",
553 hisi_uncore_pmu_online_cpu,
554 hisi_uncore_pmu_offline_cpu);
555 if (ret < 0) {
556 pr_err("UC PMU: Error setup hotplug, ret = %d\n", ret);
557 return ret;
558 }
559 hisi_uc_pmu_online = ret;
560
561 ret = platform_driver_register(&hisi_uc_pmu_driver);
562 if (ret)
563 cpuhp_remove_multi_state(hisi_uc_pmu_online);
564
565 return ret;
566 }
567 module_init(hisi_uc_pmu_module_init);
568
hisi_uc_pmu_module_exit(void)569 static void __exit hisi_uc_pmu_module_exit(void)
570 {
571 platform_driver_unregister(&hisi_uc_pmu_driver);
572 cpuhp_remove_multi_state(hisi_uc_pmu_online);
573 }
574 module_exit(hisi_uc_pmu_module_exit);
575
576 MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
577 MODULE_LICENSE("GPL");
578 MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
579