1 /*
2  * HiSilicon SoC DDRC uncore Hardware event counters support
3  *
4  * Copyright (C) 2017 Hisilicon Limited
5  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
6  *         Anurup M <anurup.m@huawei.com>
7  *
8  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 #include <linux/acpi.h>
15 #include <linux/bug.h>
16 #include <linux/cpuhotplug.h>
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/list.h>
20 #include <linux/platform_device.h>
21 #include <linux/smp.h>
22 
23 #include "hisi_uncore_pmu.h"
24 
25 /* DDRC register definition */
26 #define DDRC_PERF_CTRL		0x010
27 #define DDRC_FLUX_WR		0x380
28 #define DDRC_FLUX_RD		0x384
29 #define DDRC_FLUX_WCMD          0x388
30 #define DDRC_FLUX_RCMD          0x38c
31 #define DDRC_PRE_CMD            0x3c0
32 #define DDRC_ACT_CMD            0x3c4
33 #define DDRC_BNK_CHG            0x3c8
34 #define DDRC_RNK_CHG            0x3cc
35 #define DDRC_EVENT_CTRL         0x6C0
36 #define DDRC_INT_MASK		0x6c8
37 #define DDRC_INT_STATUS		0x6cc
38 #define DDRC_INT_CLEAR		0x6d0
39 
40 /* DDRC has 8-counters */
41 #define DDRC_NR_COUNTERS	0x8
42 #define DDRC_PERF_CTRL_EN	0x2
43 
44 /*
45  * For DDRC PMU, there are eight-events and every event has been mapped
46  * to fixed-purpose counters which register offset is not consistent.
47  * Therefore there is no write event type and we assume that event
48  * code (0 to 7) is equal to counter index in PMU driver.
49  */
50 #define GET_DDRC_EVENTID(hwc)	(hwc->config_base & 0x7)
51 
52 static const u32 ddrc_reg_off[] = {
53 	DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
54 	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
55 };
56 
57 /*
58  * Select the counter register offset using the counter index.
59  * In DDRC there are no programmable counter, the count
60  * is readed form the statistics counter register itself.
61  */
hisi_ddrc_pmu_get_counter_offset(int cntr_idx)62 static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
63 {
64 	return ddrc_reg_off[cntr_idx];
65 }
66 
hisi_ddrc_pmu_read_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)67 static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
68 				      struct hw_perf_event *hwc)
69 {
70 	/* Use event code as counter index */
71 	u32 idx = GET_DDRC_EVENTID(hwc);
72 
73 	if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
74 		dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
75 		return 0;
76 	}
77 
78 	return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
79 }
80 
hisi_ddrc_pmu_write_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc,u64 val)81 static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
82 					struct hw_perf_event *hwc, u64 val)
83 {
84 	u32 idx = GET_DDRC_EVENTID(hwc);
85 
86 	if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
87 		dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
88 		return;
89 	}
90 
91 	writel((u32)val,
92 	       ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
93 }
94 
95 /*
96  * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
97  * so there is no need to write event type.
98  */
hisi_ddrc_pmu_write_evtype(struct hisi_pmu * hha_pmu,int idx,u32 type)99 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
100 				       u32 type)
101 {
102 }
103 
hisi_ddrc_pmu_start_counters(struct hisi_pmu * ddrc_pmu)104 static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
105 {
106 	u32 val;
107 
108 	/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
109 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
110 	val |= DDRC_PERF_CTRL_EN;
111 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
112 }
113 
hisi_ddrc_pmu_stop_counters(struct hisi_pmu * ddrc_pmu)114 static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
115 {
116 	u32 val;
117 
118 	/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
119 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
120 	val &= ~DDRC_PERF_CTRL_EN;
121 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
122 }
123 
hisi_ddrc_pmu_enable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)124 static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
125 					 struct hw_perf_event *hwc)
126 {
127 	u32 val;
128 
129 	/* Set counter index(event code) in DDRC_EVENT_CTRL register */
130 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
131 	val |= (1 << GET_DDRC_EVENTID(hwc));
132 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
133 }
134 
hisi_ddrc_pmu_disable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)135 static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
136 					  struct hw_perf_event *hwc)
137 {
138 	u32 val;
139 
140 	/* Clear counter index(event code) in DDRC_EVENT_CTRL register */
141 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
142 	val &= ~(1 << GET_DDRC_EVENTID(hwc));
143 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
144 }
145 
hisi_ddrc_pmu_get_event_idx(struct perf_event * event)146 static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
147 {
148 	struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
149 	unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
150 	struct hw_perf_event *hwc = &event->hw;
151 	/* For DDRC PMU, we use event code as counter index */
152 	int idx = GET_DDRC_EVENTID(hwc);
153 
154 	if (test_bit(idx, used_mask))
155 		return -EAGAIN;
156 
157 	set_bit(idx, used_mask);
158 
159 	return idx;
160 }
161 
hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)162 static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
163 					     struct hw_perf_event *hwc)
164 {
165 	u32 val;
166 
167 	/* Write 0 to enable interrupt */
168 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
169 	val &= ~(1 << GET_DDRC_EVENTID(hwc));
170 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
171 }
172 
hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)173 static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
174 					      struct hw_perf_event *hwc)
175 {
176 	u32 val;
177 
178 	/* Write 1 to mask interrupt */
179 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
180 	val |= (1 << GET_DDRC_EVENTID(hwc));
181 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
182 }
183 
hisi_ddrc_pmu_isr(int irq,void * dev_id)184 static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
185 {
186 	struct hisi_pmu *ddrc_pmu = dev_id;
187 	struct perf_event *event;
188 	unsigned long overflown;
189 	int idx;
190 
191 	/* Read the DDRC_INT_STATUS register */
192 	overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
193 	if (!overflown)
194 		return IRQ_NONE;
195 
196 	/*
197 	 * Find the counter index which overflowed if the bit was set
198 	 * and handle it
199 	 */
200 	for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
201 		/* Write 1 to clear the IRQ status flag */
202 		writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
203 
204 		/* Get the corresponding event struct */
205 		event = ddrc_pmu->pmu_events.hw_events[idx];
206 		if (!event)
207 			continue;
208 
209 		hisi_uncore_pmu_event_update(event);
210 		hisi_uncore_pmu_set_event_period(event);
211 	}
212 
213 	return IRQ_HANDLED;
214 }
215 
hisi_ddrc_pmu_init_irq(struct hisi_pmu * ddrc_pmu,struct platform_device * pdev)216 static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
217 				  struct platform_device *pdev)
218 {
219 	int irq, ret;
220 
221 	/* Read and init IRQ */
222 	irq = platform_get_irq(pdev, 0);
223 	if (irq < 0) {
224 		dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
225 		return irq;
226 	}
227 
228 	ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
229 			       IRQF_NOBALANCING | IRQF_NO_THREAD,
230 			       dev_name(&pdev->dev), ddrc_pmu);
231 	if (ret < 0) {
232 		dev_err(&pdev->dev,
233 			"Fail to request IRQ:%d ret:%d\n", irq, ret);
234 		return ret;
235 	}
236 
237 	ddrc_pmu->irq = irq;
238 
239 	return 0;
240 }
241 
242 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
243 	{ "HISI0233", },
244 	{},
245 };
246 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
247 
hisi_ddrc_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * ddrc_pmu)248 static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
249 				   struct hisi_pmu *ddrc_pmu)
250 {
251 	struct resource *res;
252 
253 	/*
254 	 * Use the SCCL_ID and DDRC channel ID to identify the
255 	 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
256 	 */
257 	if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
258 				     &ddrc_pmu->index_id)) {
259 		dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
260 		return -EINVAL;
261 	}
262 
263 	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
264 				     &ddrc_pmu->sccl_id)) {
265 		dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
266 		return -EINVAL;
267 	}
268 	/* DDRC PMUs only share the same SCCL */
269 	ddrc_pmu->ccl_id = -1;
270 
271 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
272 	ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
273 	if (IS_ERR(ddrc_pmu->base)) {
274 		dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
275 		return PTR_ERR(ddrc_pmu->base);
276 	}
277 
278 	return 0;
279 }
280 
281 static struct attribute *hisi_ddrc_pmu_format_attr[] = {
282 	HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
283 	NULL,
284 };
285 
286 static const struct attribute_group hisi_ddrc_pmu_format_group = {
287 	.name = "format",
288 	.attrs = hisi_ddrc_pmu_format_attr,
289 };
290 
291 static struct attribute *hisi_ddrc_pmu_events_attr[] = {
292 	HISI_PMU_EVENT_ATTR(flux_wr,		0x00),
293 	HISI_PMU_EVENT_ATTR(flux_rd,		0x01),
294 	HISI_PMU_EVENT_ATTR(flux_wcmd,		0x02),
295 	HISI_PMU_EVENT_ATTR(flux_rcmd,		0x03),
296 	HISI_PMU_EVENT_ATTR(pre_cmd,		0x04),
297 	HISI_PMU_EVENT_ATTR(act_cmd,		0x05),
298 	HISI_PMU_EVENT_ATTR(rnk_chg,		0x06),
299 	HISI_PMU_EVENT_ATTR(rw_chg,		0x07),
300 	NULL,
301 };
302 
303 static const struct attribute_group hisi_ddrc_pmu_events_group = {
304 	.name = "events",
305 	.attrs = hisi_ddrc_pmu_events_attr,
306 };
307 
308 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
309 
310 static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
311 	&dev_attr_cpumask.attr,
312 	NULL,
313 };
314 
315 static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
316 	.attrs = hisi_ddrc_pmu_cpumask_attrs,
317 };
318 
319 static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
320 	&hisi_ddrc_pmu_format_group,
321 	&hisi_ddrc_pmu_events_group,
322 	&hisi_ddrc_pmu_cpumask_attr_group,
323 	NULL,
324 };
325 
326 static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
327 	.write_evtype           = hisi_ddrc_pmu_write_evtype,
328 	.get_event_idx		= hisi_ddrc_pmu_get_event_idx,
329 	.start_counters		= hisi_ddrc_pmu_start_counters,
330 	.stop_counters		= hisi_ddrc_pmu_stop_counters,
331 	.enable_counter		= hisi_ddrc_pmu_enable_counter,
332 	.disable_counter	= hisi_ddrc_pmu_disable_counter,
333 	.enable_counter_int	= hisi_ddrc_pmu_enable_counter_int,
334 	.disable_counter_int	= hisi_ddrc_pmu_disable_counter_int,
335 	.write_counter		= hisi_ddrc_pmu_write_counter,
336 	.read_counter		= hisi_ddrc_pmu_read_counter,
337 };
338 
hisi_ddrc_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * ddrc_pmu)339 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
340 				   struct hisi_pmu *ddrc_pmu)
341 {
342 	int ret;
343 
344 	ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
345 	if (ret)
346 		return ret;
347 
348 	ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
349 	if (ret)
350 		return ret;
351 
352 	ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
353 	ddrc_pmu->counter_bits = 32;
354 	ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
355 	ddrc_pmu->dev = &pdev->dev;
356 	ddrc_pmu->on_cpu = -1;
357 	ddrc_pmu->check_event = 7;
358 
359 	return 0;
360 }
361 
hisi_ddrc_pmu_probe(struct platform_device * pdev)362 static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
363 {
364 	struct hisi_pmu *ddrc_pmu;
365 	char *name;
366 	int ret;
367 
368 	ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
369 	if (!ddrc_pmu)
370 		return -ENOMEM;
371 
372 	platform_set_drvdata(pdev, ddrc_pmu);
373 
374 	ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
375 	if (ret)
376 		return ret;
377 
378 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
379 				       &ddrc_pmu->node);
380 	if (ret) {
381 		dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
382 		return ret;
383 	}
384 
385 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
386 			      ddrc_pmu->sccl_id, ddrc_pmu->index_id);
387 	ddrc_pmu->pmu = (struct pmu) {
388 		.name		= name,
389 		.task_ctx_nr	= perf_invalid_context,
390 		.event_init	= hisi_uncore_pmu_event_init,
391 		.pmu_enable	= hisi_uncore_pmu_enable,
392 		.pmu_disable	= hisi_uncore_pmu_disable,
393 		.add		= hisi_uncore_pmu_add,
394 		.del		= hisi_uncore_pmu_del,
395 		.start		= hisi_uncore_pmu_start,
396 		.stop		= hisi_uncore_pmu_stop,
397 		.read		= hisi_uncore_pmu_read,
398 		.attr_groups	= hisi_ddrc_pmu_attr_groups,
399 	};
400 
401 	ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
402 	if (ret) {
403 		dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
404 		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
405 					    &ddrc_pmu->node);
406 	}
407 
408 	return ret;
409 }
410 
hisi_ddrc_pmu_remove(struct platform_device * pdev)411 static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
412 {
413 	struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
414 
415 	perf_pmu_unregister(&ddrc_pmu->pmu);
416 	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
417 				    &ddrc_pmu->node);
418 
419 	return 0;
420 }
421 
422 static struct platform_driver hisi_ddrc_pmu_driver = {
423 	.driver = {
424 		.name = "hisi_ddrc_pmu",
425 		.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
426 	},
427 	.probe = hisi_ddrc_pmu_probe,
428 	.remove = hisi_ddrc_pmu_remove,
429 };
430 
hisi_ddrc_pmu_module_init(void)431 static int __init hisi_ddrc_pmu_module_init(void)
432 {
433 	int ret;
434 
435 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
436 				      "AP_PERF_ARM_HISI_DDRC_ONLINE",
437 				      hisi_uncore_pmu_online_cpu,
438 				      hisi_uncore_pmu_offline_cpu);
439 	if (ret) {
440 		pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
441 		return ret;
442 	}
443 
444 	ret = platform_driver_register(&hisi_ddrc_pmu_driver);
445 	if (ret)
446 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
447 
448 	return ret;
449 }
450 module_init(hisi_ddrc_pmu_module_init);
451 
hisi_ddrc_pmu_module_exit(void)452 static void __exit hisi_ddrc_pmu_module_exit(void)
453 {
454 	platform_driver_unregister(&hisi_ddrc_pmu_driver);
455 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
456 
457 }
458 module_exit(hisi_ddrc_pmu_module_exit);
459 
460 MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
461 MODULE_LICENSE("GPL v2");
462 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
463 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
464