1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon SoC DDRC uncore Hardware event counters support
4 *
5 * Copyright (C) 2017 Hisilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 * Anurup M <anurup.m@huawei.com>
8 *
9 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10 */
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
19
20 #include "hisi_uncore_pmu.h"
21
22 /* DDRC register definition */
23 #define DDRC_PERF_CTRL 0x010
24 #define DDRC_FLUX_WR 0x380
25 #define DDRC_FLUX_RD 0x384
26 #define DDRC_FLUX_WCMD 0x388
27 #define DDRC_FLUX_RCMD 0x38c
28 #define DDRC_PRE_CMD 0x3c0
29 #define DDRC_ACT_CMD 0x3c4
30 #define DDRC_RNK_CHG 0x3cc
31 #define DDRC_RW_CHG 0x3d0
32 #define DDRC_EVENT_CTRL 0x6C0
33 #define DDRC_INT_MASK 0x6c8
34 #define DDRC_INT_STATUS 0x6cc
35 #define DDRC_INT_CLEAR 0x6d0
36
37 /* DDRC has 8-counters */
38 #define DDRC_NR_COUNTERS 0x8
39 #define DDRC_PERF_CTRL_EN 0x2
40
41 /*
42 * For DDRC PMU, there are eight-events and every event has been mapped
43 * to fixed-purpose counters which register offset is not consistent.
44 * Therefore there is no write event type and we assume that event
45 * code (0 to 7) is equal to counter index in PMU driver.
46 */
47 #define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7)
48
49 static const u32 ddrc_reg_off[] = {
50 DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
51 DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
52 };
53
54 /*
55 * Select the counter register offset using the counter index.
56 * In DDRC there are no programmable counter, the count
57 * is readed form the statistics counter register itself.
58 */
hisi_ddrc_pmu_get_counter_offset(int cntr_idx)59 static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
60 {
61 return ddrc_reg_off[cntr_idx];
62 }
63
hisi_ddrc_pmu_read_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)64 static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
65 struct hw_perf_event *hwc)
66 {
67 /* Use event code as counter index */
68 u32 idx = GET_DDRC_EVENTID(hwc);
69
70 if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
71 dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
72 return 0;
73 }
74
75 return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
76 }
77
hisi_ddrc_pmu_write_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc,u64 val)78 static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
79 struct hw_perf_event *hwc, u64 val)
80 {
81 u32 idx = GET_DDRC_EVENTID(hwc);
82
83 if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
84 dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
85 return;
86 }
87
88 writel((u32)val,
89 ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
90 }
91
92 /*
93 * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
94 * so there is no need to write event type.
95 */
hisi_ddrc_pmu_write_evtype(struct hisi_pmu * hha_pmu,int idx,u32 type)96 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
97 u32 type)
98 {
99 }
100
hisi_ddrc_pmu_start_counters(struct hisi_pmu * ddrc_pmu)101 static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
102 {
103 u32 val;
104
105 /* Set perf_enable in DDRC_PERF_CTRL to start event counting */
106 val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
107 val |= DDRC_PERF_CTRL_EN;
108 writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
109 }
110
hisi_ddrc_pmu_stop_counters(struct hisi_pmu * ddrc_pmu)111 static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
112 {
113 u32 val;
114
115 /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
116 val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
117 val &= ~DDRC_PERF_CTRL_EN;
118 writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
119 }
120
hisi_ddrc_pmu_enable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)121 static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
122 struct hw_perf_event *hwc)
123 {
124 u32 val;
125
126 /* Set counter index(event code) in DDRC_EVENT_CTRL register */
127 val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
128 val |= (1 << GET_DDRC_EVENTID(hwc));
129 writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
130 }
131
hisi_ddrc_pmu_disable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)132 static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
133 struct hw_perf_event *hwc)
134 {
135 u32 val;
136
137 /* Clear counter index(event code) in DDRC_EVENT_CTRL register */
138 val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
139 val &= ~(1 << GET_DDRC_EVENTID(hwc));
140 writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
141 }
142
hisi_ddrc_pmu_get_event_idx(struct perf_event * event)143 static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
144 {
145 struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
146 unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
147 struct hw_perf_event *hwc = &event->hw;
148 /* For DDRC PMU, we use event code as counter index */
149 int idx = GET_DDRC_EVENTID(hwc);
150
151 if (test_bit(idx, used_mask))
152 return -EAGAIN;
153
154 set_bit(idx, used_mask);
155
156 return idx;
157 }
158
hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)159 static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
160 struct hw_perf_event *hwc)
161 {
162 u32 val;
163
164 /* Write 0 to enable interrupt */
165 val = readl(ddrc_pmu->base + DDRC_INT_MASK);
166 val &= ~(1 << GET_DDRC_EVENTID(hwc));
167 writel(val, ddrc_pmu->base + DDRC_INT_MASK);
168 }
169
hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)170 static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
171 struct hw_perf_event *hwc)
172 {
173 u32 val;
174
175 /* Write 1 to mask interrupt */
176 val = readl(ddrc_pmu->base + DDRC_INT_MASK);
177 val |= (1 << GET_DDRC_EVENTID(hwc));
178 writel(val, ddrc_pmu->base + DDRC_INT_MASK);
179 }
180
hisi_ddrc_pmu_isr(int irq,void * dev_id)181 static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
182 {
183 struct hisi_pmu *ddrc_pmu = dev_id;
184 struct perf_event *event;
185 unsigned long overflown;
186 int idx;
187
188 /* Read the DDRC_INT_STATUS register */
189 overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
190 if (!overflown)
191 return IRQ_NONE;
192
193 /*
194 * Find the counter index which overflowed if the bit was set
195 * and handle it
196 */
197 for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
198 /* Write 1 to clear the IRQ status flag */
199 writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
200
201 /* Get the corresponding event struct */
202 event = ddrc_pmu->pmu_events.hw_events[idx];
203 if (!event)
204 continue;
205
206 hisi_uncore_pmu_event_update(event);
207 hisi_uncore_pmu_set_event_period(event);
208 }
209
210 return IRQ_HANDLED;
211 }
212
hisi_ddrc_pmu_init_irq(struct hisi_pmu * ddrc_pmu,struct platform_device * pdev)213 static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
214 struct platform_device *pdev)
215 {
216 int irq, ret;
217
218 /* Read and init IRQ */
219 irq = platform_get_irq(pdev, 0);
220 if (irq < 0)
221 return irq;
222
223 ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
224 IRQF_NOBALANCING | IRQF_NO_THREAD,
225 dev_name(&pdev->dev), ddrc_pmu);
226 if (ret < 0) {
227 dev_err(&pdev->dev,
228 "Fail to request IRQ:%d ret:%d\n", irq, ret);
229 return ret;
230 }
231
232 ddrc_pmu->irq = irq;
233
234 return 0;
235 }
236
237 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
238 { "HISI0233", },
239 {},
240 };
241 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
242
hisi_ddrc_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * ddrc_pmu)243 static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
244 struct hisi_pmu *ddrc_pmu)
245 {
246 struct resource *res;
247
248 /*
249 * Use the SCCL_ID and DDRC channel ID to identify the
250 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
251 */
252 if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
253 &ddrc_pmu->index_id)) {
254 dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
255 return -EINVAL;
256 }
257
258 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
259 &ddrc_pmu->sccl_id)) {
260 dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
261 return -EINVAL;
262 }
263 /* DDRC PMUs only share the same SCCL */
264 ddrc_pmu->ccl_id = -1;
265
266 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
267 ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
268 if (IS_ERR(ddrc_pmu->base)) {
269 dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
270 return PTR_ERR(ddrc_pmu->base);
271 }
272
273 return 0;
274 }
275
276 static struct attribute *hisi_ddrc_pmu_format_attr[] = {
277 HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
278 NULL,
279 };
280
281 static const struct attribute_group hisi_ddrc_pmu_format_group = {
282 .name = "format",
283 .attrs = hisi_ddrc_pmu_format_attr,
284 };
285
286 static struct attribute *hisi_ddrc_pmu_events_attr[] = {
287 HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
288 HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
289 HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
290 HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03),
291 HISI_PMU_EVENT_ATTR(pre_cmd, 0x04),
292 HISI_PMU_EVENT_ATTR(act_cmd, 0x05),
293 HISI_PMU_EVENT_ATTR(rnk_chg, 0x06),
294 HISI_PMU_EVENT_ATTR(rw_chg, 0x07),
295 NULL,
296 };
297
298 static const struct attribute_group hisi_ddrc_pmu_events_group = {
299 .name = "events",
300 .attrs = hisi_ddrc_pmu_events_attr,
301 };
302
303 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
304
305 static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
306 &dev_attr_cpumask.attr,
307 NULL,
308 };
309
310 static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
311 .attrs = hisi_ddrc_pmu_cpumask_attrs,
312 };
313
314 static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
315 &hisi_ddrc_pmu_format_group,
316 &hisi_ddrc_pmu_events_group,
317 &hisi_ddrc_pmu_cpumask_attr_group,
318 NULL,
319 };
320
321 static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
322 .write_evtype = hisi_ddrc_pmu_write_evtype,
323 .get_event_idx = hisi_ddrc_pmu_get_event_idx,
324 .start_counters = hisi_ddrc_pmu_start_counters,
325 .stop_counters = hisi_ddrc_pmu_stop_counters,
326 .enable_counter = hisi_ddrc_pmu_enable_counter,
327 .disable_counter = hisi_ddrc_pmu_disable_counter,
328 .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
329 .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
330 .write_counter = hisi_ddrc_pmu_write_counter,
331 .read_counter = hisi_ddrc_pmu_read_counter,
332 };
333
hisi_ddrc_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * ddrc_pmu)334 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
335 struct hisi_pmu *ddrc_pmu)
336 {
337 int ret;
338
339 ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
340 if (ret)
341 return ret;
342
343 ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
344 if (ret)
345 return ret;
346
347 ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
348 ddrc_pmu->counter_bits = 32;
349 ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
350 ddrc_pmu->dev = &pdev->dev;
351 ddrc_pmu->on_cpu = -1;
352 ddrc_pmu->check_event = 7;
353
354 return 0;
355 }
356
hisi_ddrc_pmu_probe(struct platform_device * pdev)357 static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
358 {
359 struct hisi_pmu *ddrc_pmu;
360 char *name;
361 int ret;
362
363 ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
364 if (!ddrc_pmu)
365 return -ENOMEM;
366
367 platform_set_drvdata(pdev, ddrc_pmu);
368
369 ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
370 if (ret)
371 return ret;
372
373 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
374 &ddrc_pmu->node);
375 if (ret) {
376 dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
377 return ret;
378 }
379
380 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
381 ddrc_pmu->sccl_id, ddrc_pmu->index_id);
382 ddrc_pmu->pmu = (struct pmu) {
383 .name = name,
384 .task_ctx_nr = perf_invalid_context,
385 .event_init = hisi_uncore_pmu_event_init,
386 .pmu_enable = hisi_uncore_pmu_enable,
387 .pmu_disable = hisi_uncore_pmu_disable,
388 .add = hisi_uncore_pmu_add,
389 .del = hisi_uncore_pmu_del,
390 .start = hisi_uncore_pmu_start,
391 .stop = hisi_uncore_pmu_stop,
392 .read = hisi_uncore_pmu_read,
393 .attr_groups = hisi_ddrc_pmu_attr_groups,
394 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
395 };
396
397 ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
398 if (ret) {
399 dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
400 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
401 &ddrc_pmu->node);
402 }
403
404 return ret;
405 }
406
hisi_ddrc_pmu_remove(struct platform_device * pdev)407 static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
408 {
409 struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
410
411 perf_pmu_unregister(&ddrc_pmu->pmu);
412 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
413 &ddrc_pmu->node);
414
415 return 0;
416 }
417
418 static struct platform_driver hisi_ddrc_pmu_driver = {
419 .driver = {
420 .name = "hisi_ddrc_pmu",
421 .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
422 },
423 .probe = hisi_ddrc_pmu_probe,
424 .remove = hisi_ddrc_pmu_remove,
425 };
426
hisi_ddrc_pmu_module_init(void)427 static int __init hisi_ddrc_pmu_module_init(void)
428 {
429 int ret;
430
431 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
432 "AP_PERF_ARM_HISI_DDRC_ONLINE",
433 hisi_uncore_pmu_online_cpu,
434 hisi_uncore_pmu_offline_cpu);
435 if (ret) {
436 pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
437 return ret;
438 }
439
440 ret = platform_driver_register(&hisi_ddrc_pmu_driver);
441 if (ret)
442 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
443
444 return ret;
445 }
446 module_init(hisi_ddrc_pmu_module_init);
447
hisi_ddrc_pmu_module_exit(void)448 static void __exit hisi_ddrc_pmu_module_exit(void)
449 {
450 platform_driver_unregister(&hisi_ddrc_pmu_driver);
451 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
452
453 }
454 module_exit(hisi_ddrc_pmu_module_exit);
455
456 MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
457 MODULE_LICENSE("GPL v2");
458 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
459 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
460