1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Synopsys DDR ECC Driver
4 * This driver is based on ppc4xx_edac.c drivers
5 *
6 * Copyright (C) 2012 - 2014 Xilinx, Inc.
7 */
8
9 #include <linux/edac.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/interrupt.h>
13 #include <linux/of.h>
14 #include <linux/of_device.h>
15
16 #include "edac_module.h"
17
18 /* Number of cs_rows needed per memory controller */
19 #define SYNPS_EDAC_NR_CSROWS 1
20
21 /* Number of channels per memory controller */
22 #define SYNPS_EDAC_NR_CHANS 1
23
24 /* Granularity of reported error in bytes */
25 #define SYNPS_EDAC_ERR_GRAIN 1
26
27 #define SYNPS_EDAC_MSG_SIZE 256
28
29 #define SYNPS_EDAC_MOD_STRING "synps_edac"
30 #define SYNPS_EDAC_MOD_VER "1"
31
32 /* Synopsys DDR memory controller registers that are relevant to ECC */
33 #define CTRL_OFST 0x0
34 #define T_ZQ_OFST 0xA4
35
36 /* ECC control register */
37 #define ECC_CTRL_OFST 0xC4
38 /* ECC log register */
39 #define CE_LOG_OFST 0xC8
40 /* ECC address register */
41 #define CE_ADDR_OFST 0xCC
42 /* ECC data[31:0] register */
43 #define CE_DATA_31_0_OFST 0xD0
44
45 /* Uncorrectable error info registers */
46 #define UE_LOG_OFST 0xDC
47 #define UE_ADDR_OFST 0xE0
48 #define UE_DATA_31_0_OFST 0xE4
49
50 #define STAT_OFST 0xF0
51 #define SCRUB_OFST 0xF4
52
53 /* Control register bit field definitions */
54 #define CTRL_BW_MASK 0xC
55 #define CTRL_BW_SHIFT 2
56
57 #define DDRCTL_WDTH_16 1
58 #define DDRCTL_WDTH_32 0
59
60 /* ZQ register bit field definitions */
61 #define T_ZQ_DDRMODE_MASK 0x2
62
63 /* ECC control register bit field definitions */
64 #define ECC_CTRL_CLR_CE_ERR 0x2
65 #define ECC_CTRL_CLR_UE_ERR 0x1
66
67 /* ECC correctable/uncorrectable error log register definitions */
68 #define LOG_VALID 0x1
69 #define CE_LOG_BITPOS_MASK 0xFE
70 #define CE_LOG_BITPOS_SHIFT 1
71
72 /* ECC correctable/uncorrectable error address register definitions */
73 #define ADDR_COL_MASK 0xFFF
74 #define ADDR_ROW_MASK 0xFFFF000
75 #define ADDR_ROW_SHIFT 12
76 #define ADDR_BANK_MASK 0x70000000
77 #define ADDR_BANK_SHIFT 28
78
79 /* ECC statistic register definitions */
80 #define STAT_UECNT_MASK 0xFF
81 #define STAT_CECNT_MASK 0xFF00
82 #define STAT_CECNT_SHIFT 8
83
84 /* ECC scrub register definitions */
85 #define SCRUB_MODE_MASK 0x7
86 #define SCRUB_MODE_SECDED 0x4
87
88 /* DDR ECC Quirks */
89 #define DDR_ECC_INTR_SUPPORT BIT(0)
90 #define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
91 #define DDR_ECC_INTR_SELF_CLEAR BIT(2)
92
93 /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
94 /* ECC Configuration Registers */
95 #define ECC_CFG0_OFST 0x70
96 #define ECC_CFG1_OFST 0x74
97
98 /* ECC Status Register */
99 #define ECC_STAT_OFST 0x78
100
101 /* ECC Clear Register */
102 #define ECC_CLR_OFST 0x7C
103
104 /* ECC Error count Register */
105 #define ECC_ERRCNT_OFST 0x80
106
107 /* ECC Corrected Error Address Register */
108 #define ECC_CEADDR0_OFST 0x84
109 #define ECC_CEADDR1_OFST 0x88
110
111 /* ECC Syndrome Registers */
112 #define ECC_CSYND0_OFST 0x8C
113 #define ECC_CSYND1_OFST 0x90
114 #define ECC_CSYND2_OFST 0x94
115
116 /* ECC Bit Mask0 Address Register */
117 #define ECC_BITMASK0_OFST 0x98
118 #define ECC_BITMASK1_OFST 0x9C
119 #define ECC_BITMASK2_OFST 0xA0
120
121 /* ECC UnCorrected Error Address Register */
122 #define ECC_UEADDR0_OFST 0xA4
123 #define ECC_UEADDR1_OFST 0xA8
124
125 /* ECC Syndrome Registers */
126 #define ECC_UESYND0_OFST 0xAC
127 #define ECC_UESYND1_OFST 0xB0
128 #define ECC_UESYND2_OFST 0xB4
129
130 /* ECC Poison Address Reg */
131 #define ECC_POISON0_OFST 0xB8
132 #define ECC_POISON1_OFST 0xBC
133
134 #define ECC_ADDRMAP0_OFFSET 0x200
135
136 /* Control register bitfield definitions */
137 #define ECC_CTRL_BUSWIDTH_MASK 0x3000
138 #define ECC_CTRL_BUSWIDTH_SHIFT 12
139 #define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
140 #define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
141
142 /* DDR Control Register width definitions */
143 #define DDRCTL_EWDTH_16 2
144 #define DDRCTL_EWDTH_32 1
145 #define DDRCTL_EWDTH_64 0
146
147 /* ECC status register definitions */
148 #define ECC_STAT_UECNT_MASK 0xF0000
149 #define ECC_STAT_UECNT_SHIFT 16
150 #define ECC_STAT_CECNT_MASK 0xF00
151 #define ECC_STAT_CECNT_SHIFT 8
152 #define ECC_STAT_BITNUM_MASK 0x7F
153
154 /* ECC error count register definitions */
155 #define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
156 #define ECC_ERRCNT_UECNT_SHIFT 16
157 #define ECC_ERRCNT_CECNT_MASK 0xFFFF
158
159 /* DDR QOS Interrupt register definitions */
160 #define DDR_QOS_IRQ_STAT_OFST 0x20200
161 #define DDR_QOSUE_MASK 0x4
162 #define DDR_QOSCE_MASK 0x2
163 #define ECC_CE_UE_INTR_MASK 0x6
164 #define DDR_QOS_IRQ_EN_OFST 0x20208
165 #define DDR_QOS_IRQ_DB_OFST 0x2020C
166
167 /* DDR QOS Interrupt register definitions */
168 #define DDR_UE_MASK BIT(9)
169 #define DDR_CE_MASK BIT(8)
170
171 /* ECC Corrected Error Register Mask and Shifts*/
172 #define ECC_CEADDR0_RW_MASK 0x3FFFF
173 #define ECC_CEADDR0_RNK_MASK BIT(24)
174 #define ECC_CEADDR1_BNKGRP_MASK 0x3000000
175 #define ECC_CEADDR1_BNKNR_MASK 0x70000
176 #define ECC_CEADDR1_BLKNR_MASK 0xFFF
177 #define ECC_CEADDR1_BNKGRP_SHIFT 24
178 #define ECC_CEADDR1_BNKNR_SHIFT 16
179
180 /* ECC Poison register shifts */
181 #define ECC_POISON0_RANK_SHIFT 24
182 #define ECC_POISON0_RANK_MASK BIT(24)
183 #define ECC_POISON0_COLUMN_SHIFT 0
184 #define ECC_POISON0_COLUMN_MASK 0xFFF
185 #define ECC_POISON1_BG_SHIFT 28
186 #define ECC_POISON1_BG_MASK 0x30000000
187 #define ECC_POISON1_BANKNR_SHIFT 24
188 #define ECC_POISON1_BANKNR_MASK 0x7000000
189 #define ECC_POISON1_ROW_SHIFT 0
190 #define ECC_POISON1_ROW_MASK 0x3FFFF
191
192 /* DDR Memory type defines */
193 #define MEM_TYPE_DDR3 0x1
194 #define MEM_TYPE_LPDDR3 0x8
195 #define MEM_TYPE_DDR2 0x4
196 #define MEM_TYPE_DDR4 0x10
197 #define MEM_TYPE_LPDDR4 0x20
198
199 /* DDRC Software control register */
200 #define DDRC_SWCTL 0x320
201
202 /* DDRC ECC CE & UE poison mask */
203 #define ECC_CEPOISON_MASK 0x3
204 #define ECC_UEPOISON_MASK 0x1
205
206 /* DDRC Device config masks */
207 #define DDRC_MSTR_CFG_MASK 0xC0000000
208 #define DDRC_MSTR_CFG_SHIFT 30
209 #define DDRC_MSTR_CFG_X4_MASK 0x0
210 #define DDRC_MSTR_CFG_X8_MASK 0x1
211 #define DDRC_MSTR_CFG_X16_MASK 0x2
212 #define DDRC_MSTR_CFG_X32_MASK 0x3
213
214 #define DDR_MAX_ROW_SHIFT 18
215 #define DDR_MAX_COL_SHIFT 14
216 #define DDR_MAX_BANK_SHIFT 3
217 #define DDR_MAX_BANKGRP_SHIFT 2
218
219 #define ROW_MAX_VAL_MASK 0xF
220 #define COL_MAX_VAL_MASK 0xF
221 #define BANK_MAX_VAL_MASK 0x1F
222 #define BANKGRP_MAX_VAL_MASK 0x1F
223 #define RANK_MAX_VAL_MASK 0x1F
224
225 #define ROW_B0_BASE 6
226 #define ROW_B1_BASE 7
227 #define ROW_B2_BASE 8
228 #define ROW_B3_BASE 9
229 #define ROW_B4_BASE 10
230 #define ROW_B5_BASE 11
231 #define ROW_B6_BASE 12
232 #define ROW_B7_BASE 13
233 #define ROW_B8_BASE 14
234 #define ROW_B9_BASE 15
235 #define ROW_B10_BASE 16
236 #define ROW_B11_BASE 17
237 #define ROW_B12_BASE 18
238 #define ROW_B13_BASE 19
239 #define ROW_B14_BASE 20
240 #define ROW_B15_BASE 21
241 #define ROW_B16_BASE 22
242 #define ROW_B17_BASE 23
243
244 #define COL_B2_BASE 2
245 #define COL_B3_BASE 3
246 #define COL_B4_BASE 4
247 #define COL_B5_BASE 5
248 #define COL_B6_BASE 6
249 #define COL_B7_BASE 7
250 #define COL_B8_BASE 8
251 #define COL_B9_BASE 9
252 #define COL_B10_BASE 10
253 #define COL_B11_BASE 11
254 #define COL_B12_BASE 12
255 #define COL_B13_BASE 13
256
257 #define BANK_B0_BASE 2
258 #define BANK_B1_BASE 3
259 #define BANK_B2_BASE 4
260
261 #define BANKGRP_B0_BASE 2
262 #define BANKGRP_B1_BASE 3
263
264 #define RANK_B0_BASE 6
265
266 /**
267 * struct ecc_error_info - ECC error log information.
268 * @row: Row number.
269 * @col: Column number.
270 * @bank: Bank number.
271 * @bitpos: Bit position.
272 * @data: Data causing the error.
273 * @bankgrpnr: Bank group number.
274 * @blknr: Block number.
275 */
276 struct ecc_error_info {
277 u32 row;
278 u32 col;
279 u32 bank;
280 u32 bitpos;
281 u32 data;
282 u32 bankgrpnr;
283 u32 blknr;
284 };
285
286 /**
287 * struct synps_ecc_status - ECC status information to report.
288 * @ce_cnt: Correctable error count.
289 * @ue_cnt: Uncorrectable error count.
290 * @ceinfo: Correctable error log information.
291 * @ueinfo: Uncorrectable error log information.
292 */
293 struct synps_ecc_status {
294 u32 ce_cnt;
295 u32 ue_cnt;
296 struct ecc_error_info ceinfo;
297 struct ecc_error_info ueinfo;
298 };
299
300 /**
301 * struct synps_edac_priv - DDR memory controller private instance data.
302 * @baseaddr: Base address of the DDR controller.
303 * @message: Buffer for framing the event specific info.
304 * @stat: ECC status information.
305 * @p_data: Platform data.
306 * @ce_cnt: Correctable Error count.
307 * @ue_cnt: Uncorrectable Error count.
308 * @poison_addr: Data poison address.
309 * @row_shift: Bit shifts for row bit.
310 * @col_shift: Bit shifts for column bit.
311 * @bank_shift: Bit shifts for bank bit.
312 * @bankgrp_shift: Bit shifts for bank group bit.
313 * @rank_shift: Bit shifts for rank bit.
314 */
315 struct synps_edac_priv {
316 void __iomem *baseaddr;
317 char message[SYNPS_EDAC_MSG_SIZE];
318 struct synps_ecc_status stat;
319 const struct synps_platform_data *p_data;
320 u32 ce_cnt;
321 u32 ue_cnt;
322 #ifdef CONFIG_EDAC_DEBUG
323 ulong poison_addr;
324 u32 row_shift[18];
325 u32 col_shift[14];
326 u32 bank_shift[3];
327 u32 bankgrp_shift[2];
328 u32 rank_shift[1];
329 #endif
330 };
331
332 /**
333 * struct synps_platform_data - synps platform data structure.
334 * @get_error_info: Get EDAC error info.
335 * @get_mtype: Get mtype.
336 * @get_dtype: Get dtype.
337 * @get_ecc_state: Get ECC state.
338 * @quirks: To differentiate IPs.
339 */
340 struct synps_platform_data {
341 int (*get_error_info)(struct synps_edac_priv *priv);
342 enum mem_type (*get_mtype)(const void __iomem *base);
343 enum dev_type (*get_dtype)(const void __iomem *base);
344 bool (*get_ecc_state)(void __iomem *base);
345 int quirks;
346 };
347
348 /**
349 * zynq_get_error_info - Get the current ECC error info.
350 * @priv: DDR memory controller private instance data.
351 *
352 * Return: one if there is no error, otherwise zero.
353 */
zynq_get_error_info(struct synps_edac_priv * priv)354 static int zynq_get_error_info(struct synps_edac_priv *priv)
355 {
356 struct synps_ecc_status *p;
357 u32 regval, clearval = 0;
358 void __iomem *base;
359
360 base = priv->baseaddr;
361 p = &priv->stat;
362
363 regval = readl(base + STAT_OFST);
364 if (!regval)
365 return 1;
366
367 p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
368 p->ue_cnt = regval & STAT_UECNT_MASK;
369
370 regval = readl(base + CE_LOG_OFST);
371 if (!(p->ce_cnt && (regval & LOG_VALID)))
372 goto ue_err;
373
374 p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
375 regval = readl(base + CE_ADDR_OFST);
376 p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
377 p->ceinfo.col = regval & ADDR_COL_MASK;
378 p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
379 p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
380 edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
381 p->ceinfo.data);
382 clearval = ECC_CTRL_CLR_CE_ERR;
383
384 ue_err:
385 regval = readl(base + UE_LOG_OFST);
386 if (!(p->ue_cnt && (regval & LOG_VALID)))
387 goto out;
388
389 regval = readl(base + UE_ADDR_OFST);
390 p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
391 p->ueinfo.col = regval & ADDR_COL_MASK;
392 p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
393 p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
394 clearval |= ECC_CTRL_CLR_UE_ERR;
395
396 out:
397 writel(clearval, base + ECC_CTRL_OFST);
398 writel(0x0, base + ECC_CTRL_OFST);
399
400 return 0;
401 }
402
403 /**
404 * zynqmp_get_error_info - Get the current ECC error info.
405 * @priv: DDR memory controller private instance data.
406 *
407 * Return: one if there is no error otherwise returns zero.
408 */
zynqmp_get_error_info(struct synps_edac_priv * priv)409 static int zynqmp_get_error_info(struct synps_edac_priv *priv)
410 {
411 struct synps_ecc_status *p;
412 u32 regval, clearval = 0;
413 void __iomem *base;
414
415 base = priv->baseaddr;
416 p = &priv->stat;
417
418 regval = readl(base + ECC_ERRCNT_OFST);
419 p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
420 p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
421 if (!p->ce_cnt)
422 goto ue_err;
423
424 regval = readl(base + ECC_STAT_OFST);
425 if (!regval)
426 return 1;
427
428 p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
429
430 regval = readl(base + ECC_CEADDR0_OFST);
431 p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
432 regval = readl(base + ECC_CEADDR1_OFST);
433 p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
434 ECC_CEADDR1_BNKNR_SHIFT;
435 p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
436 ECC_CEADDR1_BNKGRP_SHIFT;
437 p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
438 p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
439 edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
440 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
441 readl(base + ECC_CSYND2_OFST));
442 ue_err:
443 if (!p->ue_cnt)
444 goto out;
445
446 regval = readl(base + ECC_UEADDR0_OFST);
447 p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
448 regval = readl(base + ECC_UEADDR1_OFST);
449 p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
450 ECC_CEADDR1_BNKGRP_SHIFT;
451 p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
452 ECC_CEADDR1_BNKNR_SHIFT;
453 p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
454 p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
455 out:
456 clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
457 clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
458 writel(clearval, base + ECC_CLR_OFST);
459 writel(0x0, base + ECC_CLR_OFST);
460
461 return 0;
462 }
463
464 /**
465 * handle_error - Handle Correctable and Uncorrectable errors.
466 * @mci: EDAC memory controller instance.
467 * @p: Synopsys ECC status structure.
468 *
469 * Handles ECC correctable and uncorrectable errors.
470 */
handle_error(struct mem_ctl_info * mci,struct synps_ecc_status * p)471 static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
472 {
473 struct synps_edac_priv *priv = mci->pvt_info;
474 struct ecc_error_info *pinf;
475
476 if (p->ce_cnt) {
477 pinf = &p->ceinfo;
478 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
479 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
480 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
481 "CE", pinf->row, pinf->bank,
482 pinf->bankgrpnr, pinf->blknr,
483 pinf->bitpos, pinf->data);
484 } else {
485 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
486 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
487 "CE", pinf->row, pinf->bank, pinf->col,
488 pinf->bitpos, pinf->data);
489 }
490
491 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
492 p->ce_cnt, 0, 0, 0, 0, 0, -1,
493 priv->message, "");
494 }
495
496 if (p->ue_cnt) {
497 pinf = &p->ueinfo;
498 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
499 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
500 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
501 "UE", pinf->row, pinf->bank,
502 pinf->bankgrpnr, pinf->blknr);
503 } else {
504 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
505 "DDR ECC error type :%s Row %d Bank %d Col %d ",
506 "UE", pinf->row, pinf->bank, pinf->col);
507 }
508
509 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
510 p->ue_cnt, 0, 0, 0, 0, 0, -1,
511 priv->message, "");
512 }
513
514 memset(p, 0, sizeof(*p));
515 }
516
enable_intr(struct synps_edac_priv * priv)517 static void enable_intr(struct synps_edac_priv *priv)
518 {
519 /* Enable UE/CE Interrupts */
520 if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
521 writel(DDR_UE_MASK | DDR_CE_MASK,
522 priv->baseaddr + ECC_CLR_OFST);
523 else
524 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
525 priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
526
527 }
528
disable_intr(struct synps_edac_priv * priv)529 static void disable_intr(struct synps_edac_priv *priv)
530 {
531 /* Disable UE/CE Interrupts */
532 if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
533 writel(0x0, priv->baseaddr + ECC_CLR_OFST);
534 else
535 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
536 priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
537 }
538
539 /**
540 * intr_handler - Interrupt Handler for ECC interrupts.
541 * @irq: IRQ number.
542 * @dev_id: Device ID.
543 *
544 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
545 */
intr_handler(int irq,void * dev_id)546 static irqreturn_t intr_handler(int irq, void *dev_id)
547 {
548 const struct synps_platform_data *p_data;
549 struct mem_ctl_info *mci = dev_id;
550 struct synps_edac_priv *priv;
551 int status, regval;
552
553 priv = mci->pvt_info;
554 p_data = priv->p_data;
555
556 /*
557 * v3.0 of the controller has the ce/ue bits cleared automatically,
558 * so this condition does not apply.
559 */
560 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
561 regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
562 regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
563 if (!(regval & ECC_CE_UE_INTR_MASK))
564 return IRQ_NONE;
565 }
566
567 status = p_data->get_error_info(priv);
568 if (status)
569 return IRQ_NONE;
570
571 priv->ce_cnt += priv->stat.ce_cnt;
572 priv->ue_cnt += priv->stat.ue_cnt;
573 handle_error(mci, &priv->stat);
574
575 edac_dbg(3, "Total error count CE %d UE %d\n",
576 priv->ce_cnt, priv->ue_cnt);
577 /* v3.0 of the controller does not have this register */
578 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
579 writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
580 else
581 enable_intr(priv);
582
583 return IRQ_HANDLED;
584 }
585
586 /**
587 * check_errors - Check controller for ECC errors.
588 * @mci: EDAC memory controller instance.
589 *
590 * Check and post ECC errors. Called by the polling thread.
591 */
check_errors(struct mem_ctl_info * mci)592 static void check_errors(struct mem_ctl_info *mci)
593 {
594 const struct synps_platform_data *p_data;
595 struct synps_edac_priv *priv;
596 int status;
597
598 priv = mci->pvt_info;
599 p_data = priv->p_data;
600
601 status = p_data->get_error_info(priv);
602 if (status)
603 return;
604
605 priv->ce_cnt += priv->stat.ce_cnt;
606 priv->ue_cnt += priv->stat.ue_cnt;
607 handle_error(mci, &priv->stat);
608
609 edac_dbg(3, "Total error count CE %d UE %d\n",
610 priv->ce_cnt, priv->ue_cnt);
611 }
612
613 /**
614 * zynq_get_dtype - Return the controller memory width.
615 * @base: DDR memory controller base address.
616 *
617 * Get the EDAC device type width appropriate for the current controller
618 * configuration.
619 *
620 * Return: a device type width enumeration.
621 */
zynq_get_dtype(const void __iomem * base)622 static enum dev_type zynq_get_dtype(const void __iomem *base)
623 {
624 enum dev_type dt;
625 u32 width;
626
627 width = readl(base + CTRL_OFST);
628 width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
629
630 switch (width) {
631 case DDRCTL_WDTH_16:
632 dt = DEV_X2;
633 break;
634 case DDRCTL_WDTH_32:
635 dt = DEV_X4;
636 break;
637 default:
638 dt = DEV_UNKNOWN;
639 }
640
641 return dt;
642 }
643
644 /**
645 * zynqmp_get_dtype - Return the controller memory width.
646 * @base: DDR memory controller base address.
647 *
648 * Get the EDAC device type width appropriate for the current controller
649 * configuration.
650 *
651 * Return: a device type width enumeration.
652 */
zynqmp_get_dtype(const void __iomem * base)653 static enum dev_type zynqmp_get_dtype(const void __iomem *base)
654 {
655 enum dev_type dt;
656 u32 width;
657
658 width = readl(base + CTRL_OFST);
659 width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
660 switch (width) {
661 case DDRCTL_EWDTH_16:
662 dt = DEV_X2;
663 break;
664 case DDRCTL_EWDTH_32:
665 dt = DEV_X4;
666 break;
667 case DDRCTL_EWDTH_64:
668 dt = DEV_X8;
669 break;
670 default:
671 dt = DEV_UNKNOWN;
672 }
673
674 return dt;
675 }
676
677 /**
678 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
679 * @base: DDR memory controller base address.
680 *
681 * Get the ECC enable/disable status of the controller.
682 *
683 * Return: true if enabled, otherwise false.
684 */
zynq_get_ecc_state(void __iomem * base)685 static bool zynq_get_ecc_state(void __iomem *base)
686 {
687 enum dev_type dt;
688 u32 ecctype;
689
690 dt = zynq_get_dtype(base);
691 if (dt == DEV_UNKNOWN)
692 return false;
693
694 ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
695 if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
696 return true;
697
698 return false;
699 }
700
701 /**
702 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
703 * @base: DDR memory controller base address.
704 *
705 * Get the ECC enable/disable status for the controller.
706 *
707 * Return: a ECC status boolean i.e true/false - enabled/disabled.
708 */
zynqmp_get_ecc_state(void __iomem * base)709 static bool zynqmp_get_ecc_state(void __iomem *base)
710 {
711 enum dev_type dt;
712 u32 ecctype;
713
714 dt = zynqmp_get_dtype(base);
715 if (dt == DEV_UNKNOWN)
716 return false;
717
718 ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
719 if ((ecctype == SCRUB_MODE_SECDED) &&
720 ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
721 return true;
722
723 return false;
724 }
725
726 /**
727 * get_memsize - Read the size of the attached memory device.
728 *
729 * Return: the memory size in bytes.
730 */
get_memsize(void)731 static u32 get_memsize(void)
732 {
733 struct sysinfo inf;
734
735 si_meminfo(&inf);
736
737 return inf.totalram * inf.mem_unit;
738 }
739
740 /**
741 * zynq_get_mtype - Return the controller memory type.
742 * @base: Synopsys ECC status structure.
743 *
744 * Get the EDAC memory type appropriate for the current controller
745 * configuration.
746 *
747 * Return: a memory type enumeration.
748 */
zynq_get_mtype(const void __iomem * base)749 static enum mem_type zynq_get_mtype(const void __iomem *base)
750 {
751 enum mem_type mt;
752 u32 memtype;
753
754 memtype = readl(base + T_ZQ_OFST);
755
756 if (memtype & T_ZQ_DDRMODE_MASK)
757 mt = MEM_DDR3;
758 else
759 mt = MEM_DDR2;
760
761 return mt;
762 }
763
764 /**
765 * zynqmp_get_mtype - Returns controller memory type.
766 * @base: Synopsys ECC status structure.
767 *
768 * Get the EDAC memory type appropriate for the current controller
769 * configuration.
770 *
771 * Return: a memory type enumeration.
772 */
zynqmp_get_mtype(const void __iomem * base)773 static enum mem_type zynqmp_get_mtype(const void __iomem *base)
774 {
775 enum mem_type mt;
776 u32 memtype;
777
778 memtype = readl(base + CTRL_OFST);
779
780 if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
781 mt = MEM_DDR3;
782 else if (memtype & MEM_TYPE_DDR2)
783 mt = MEM_RDDR2;
784 else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
785 mt = MEM_DDR4;
786 else
787 mt = MEM_EMPTY;
788
789 return mt;
790 }
791
792 /**
793 * init_csrows - Initialize the csrow data.
794 * @mci: EDAC memory controller instance.
795 *
796 * Initialize the chip select rows associated with the EDAC memory
797 * controller instance.
798 */
init_csrows(struct mem_ctl_info * mci)799 static void init_csrows(struct mem_ctl_info *mci)
800 {
801 struct synps_edac_priv *priv = mci->pvt_info;
802 const struct synps_platform_data *p_data;
803 struct csrow_info *csi;
804 struct dimm_info *dimm;
805 u32 size, row;
806 int j;
807
808 p_data = priv->p_data;
809
810 for (row = 0; row < mci->nr_csrows; row++) {
811 csi = mci->csrows[row];
812 size = get_memsize();
813
814 for (j = 0; j < csi->nr_channels; j++) {
815 dimm = csi->channels[j]->dimm;
816 dimm->edac_mode = EDAC_SECDED;
817 dimm->mtype = p_data->get_mtype(priv->baseaddr);
818 dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
819 dimm->grain = SYNPS_EDAC_ERR_GRAIN;
820 dimm->dtype = p_data->get_dtype(priv->baseaddr);
821 }
822 }
823 }
824
825 /**
826 * mc_init - Initialize one driver instance.
827 * @mci: EDAC memory controller instance.
828 * @pdev: platform device.
829 *
830 * Perform initialization of the EDAC memory controller instance and
831 * related driver-private data associated with the memory controller the
832 * instance is bound to.
833 */
mc_init(struct mem_ctl_info * mci,struct platform_device * pdev)834 static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
835 {
836 struct synps_edac_priv *priv;
837
838 mci->pdev = &pdev->dev;
839 priv = mci->pvt_info;
840 platform_set_drvdata(pdev, mci);
841
842 /* Initialize controller capabilities and configuration */
843 mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
844 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
845 mci->scrub_cap = SCRUB_HW_SRC;
846 mci->scrub_mode = SCRUB_NONE;
847
848 mci->edac_cap = EDAC_FLAG_SECDED;
849 mci->ctl_name = "synps_ddr_controller";
850 mci->dev_name = SYNPS_EDAC_MOD_STRING;
851 mci->mod_name = SYNPS_EDAC_MOD_VER;
852
853 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
854 edac_op_state = EDAC_OPSTATE_INT;
855 } else {
856 edac_op_state = EDAC_OPSTATE_POLL;
857 mci->edac_check = check_errors;
858 }
859
860 mci->ctl_page_to_phys = NULL;
861
862 init_csrows(mci);
863 }
864
setup_irq(struct mem_ctl_info * mci,struct platform_device * pdev)865 static int setup_irq(struct mem_ctl_info *mci,
866 struct platform_device *pdev)
867 {
868 struct synps_edac_priv *priv = mci->pvt_info;
869 int ret, irq;
870
871 irq = platform_get_irq(pdev, 0);
872 if (irq < 0) {
873 edac_printk(KERN_ERR, EDAC_MC,
874 "No IRQ %d in DT\n", irq);
875 return irq;
876 }
877
878 ret = devm_request_irq(&pdev->dev, irq, intr_handler,
879 0, dev_name(&pdev->dev), mci);
880 if (ret < 0) {
881 edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
882 return ret;
883 }
884
885 enable_intr(priv);
886
887 return 0;
888 }
889
890 static const struct synps_platform_data zynq_edac_def = {
891 .get_error_info = zynq_get_error_info,
892 .get_mtype = zynq_get_mtype,
893 .get_dtype = zynq_get_dtype,
894 .get_ecc_state = zynq_get_ecc_state,
895 .quirks = 0,
896 };
897
898 static const struct synps_platform_data zynqmp_edac_def = {
899 .get_error_info = zynqmp_get_error_info,
900 .get_mtype = zynqmp_get_mtype,
901 .get_dtype = zynqmp_get_dtype,
902 .get_ecc_state = zynqmp_get_ecc_state,
903 .quirks = (DDR_ECC_INTR_SUPPORT
904 #ifdef CONFIG_EDAC_DEBUG
905 | DDR_ECC_DATA_POISON_SUPPORT
906 #endif
907 ),
908 };
909
910 static const struct synps_platform_data synopsys_edac_def = {
911 .get_error_info = zynqmp_get_error_info,
912 .get_mtype = zynqmp_get_mtype,
913 .get_dtype = zynqmp_get_dtype,
914 .get_ecc_state = zynqmp_get_ecc_state,
915 .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
916 #ifdef CONFIG_EDAC_DEBUG
917 | DDR_ECC_DATA_POISON_SUPPORT
918 #endif
919 ),
920 };
921
922
923 static const struct of_device_id synps_edac_match[] = {
924 {
925 .compatible = "xlnx,zynq-ddrc-a05",
926 .data = (void *)&zynq_edac_def
927 },
928 {
929 .compatible = "xlnx,zynqmp-ddrc-2.40a",
930 .data = (void *)&zynqmp_edac_def
931 },
932 {
933 .compatible = "snps,ddrc-3.80a",
934 .data = (void *)&synopsys_edac_def
935 },
936 {
937 /* end of table */
938 }
939 };
940
941 MODULE_DEVICE_TABLE(of, synps_edac_match);
942
943 #ifdef CONFIG_EDAC_DEBUG
944 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
945
946 /**
947 * ddr_poison_setup - Update poison registers.
948 * @priv: DDR memory controller private instance data.
949 *
950 * Update poison registers as per DDR mapping.
951 * Return: none.
952 */
ddr_poison_setup(struct synps_edac_priv * priv)953 static void ddr_poison_setup(struct synps_edac_priv *priv)
954 {
955 int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
956 int index;
957 ulong hif_addr = 0;
958
959 hif_addr = priv->poison_addr >> 3;
960
961 for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
962 if (priv->row_shift[index])
963 row |= (((hif_addr >> priv->row_shift[index]) &
964 BIT(0)) << index);
965 else
966 break;
967 }
968
969 for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
970 if (priv->col_shift[index] || index < 3)
971 col |= (((hif_addr >> priv->col_shift[index]) &
972 BIT(0)) << index);
973 else
974 break;
975 }
976
977 for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
978 if (priv->bank_shift[index])
979 bank |= (((hif_addr >> priv->bank_shift[index]) &
980 BIT(0)) << index);
981 else
982 break;
983 }
984
985 for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
986 if (priv->bankgrp_shift[index])
987 bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
988 & BIT(0)) << index);
989 else
990 break;
991 }
992
993 if (priv->rank_shift[0])
994 rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
995
996 regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
997 regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
998 writel(regval, priv->baseaddr + ECC_POISON0_OFST);
999
1000 regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1001 regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1002 regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1003 writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1004 }
1005
inject_data_error_show(struct device * dev,struct device_attribute * mattr,char * data)1006 static ssize_t inject_data_error_show(struct device *dev,
1007 struct device_attribute *mattr,
1008 char *data)
1009 {
1010 struct mem_ctl_info *mci = to_mci(dev);
1011 struct synps_edac_priv *priv = mci->pvt_info;
1012
1013 return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1014 "Error injection Address: 0x%lx\n\r",
1015 readl(priv->baseaddr + ECC_POISON0_OFST),
1016 readl(priv->baseaddr + ECC_POISON1_OFST),
1017 priv->poison_addr);
1018 }
1019
inject_data_error_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)1020 static ssize_t inject_data_error_store(struct device *dev,
1021 struct device_attribute *mattr,
1022 const char *data, size_t count)
1023 {
1024 struct mem_ctl_info *mci = to_mci(dev);
1025 struct synps_edac_priv *priv = mci->pvt_info;
1026
1027 if (kstrtoul(data, 0, &priv->poison_addr))
1028 return -EINVAL;
1029
1030 ddr_poison_setup(priv);
1031
1032 return count;
1033 }
1034
inject_data_poison_show(struct device * dev,struct device_attribute * mattr,char * data)1035 static ssize_t inject_data_poison_show(struct device *dev,
1036 struct device_attribute *mattr,
1037 char *data)
1038 {
1039 struct mem_ctl_info *mci = to_mci(dev);
1040 struct synps_edac_priv *priv = mci->pvt_info;
1041
1042 return sprintf(data, "Data Poisoning: %s\n\r",
1043 (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1044 ? ("Correctable Error") : ("UnCorrectable Error"));
1045 }
1046
inject_data_poison_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)1047 static ssize_t inject_data_poison_store(struct device *dev,
1048 struct device_attribute *mattr,
1049 const char *data, size_t count)
1050 {
1051 struct mem_ctl_info *mci = to_mci(dev);
1052 struct synps_edac_priv *priv = mci->pvt_info;
1053
1054 writel(0, priv->baseaddr + DDRC_SWCTL);
1055 if (strncmp(data, "CE", 2) == 0)
1056 writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1057 else
1058 writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1059 writel(1, priv->baseaddr + DDRC_SWCTL);
1060
1061 return count;
1062 }
1063
1064 static DEVICE_ATTR_RW(inject_data_error);
1065 static DEVICE_ATTR_RW(inject_data_poison);
1066
edac_create_sysfs_attributes(struct mem_ctl_info * mci)1067 static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1068 {
1069 int rc;
1070
1071 rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1072 if (rc < 0)
1073 return rc;
1074 rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1075 if (rc < 0)
1076 return rc;
1077 return 0;
1078 }
1079
edac_remove_sysfs_attributes(struct mem_ctl_info * mci)1080 static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1081 {
1082 device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1083 device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1084 }
1085
setup_row_address_map(struct synps_edac_priv * priv,u32 * addrmap)1086 static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1087 {
1088 u32 addrmap_row_b2_10;
1089 int index;
1090
1091 priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1092 priv->row_shift[1] = ((addrmap[5] >> 8) &
1093 ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1094
1095 addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1096 if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1097 for (index = 2; index < 11; index++)
1098 priv->row_shift[index] = addrmap_row_b2_10 +
1099 index + ROW_B0_BASE;
1100
1101 } else {
1102 priv->row_shift[2] = (addrmap[9] &
1103 ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1104 priv->row_shift[3] = ((addrmap[9] >> 8) &
1105 ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1106 priv->row_shift[4] = ((addrmap[9] >> 16) &
1107 ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1108 priv->row_shift[5] = ((addrmap[9] >> 24) &
1109 ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1110 priv->row_shift[6] = (addrmap[10] &
1111 ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1112 priv->row_shift[7] = ((addrmap[10] >> 8) &
1113 ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1114 priv->row_shift[8] = ((addrmap[10] >> 16) &
1115 ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1116 priv->row_shift[9] = ((addrmap[10] >> 24) &
1117 ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1118 priv->row_shift[10] = (addrmap[11] &
1119 ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1120 }
1121
1122 priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1123 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1124 ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1125 priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1126 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1127 ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1128 priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1129 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1130 ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1131 priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1132 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1133 ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1134 priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1135 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1136 ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1137 priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1138 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1139 ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1140 priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1141 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1142 ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1143 }
1144
setup_column_address_map(struct synps_edac_priv * priv,u32 * addrmap)1145 static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1146 {
1147 u32 width, memtype;
1148 int index;
1149
1150 memtype = readl(priv->baseaddr + CTRL_OFST);
1151 width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1152
1153 priv->col_shift[0] = 0;
1154 priv->col_shift[1] = 1;
1155 priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1156 priv->col_shift[3] = ((addrmap[2] >> 8) &
1157 COL_MAX_VAL_MASK) + COL_B3_BASE;
1158 priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1159 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1160 COL_MAX_VAL_MASK) + COL_B4_BASE);
1161 priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1162 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1163 COL_MAX_VAL_MASK) + COL_B5_BASE);
1164 priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1165 COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1166 COL_MAX_VAL_MASK) + COL_B6_BASE);
1167 priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1168 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1169 COL_MAX_VAL_MASK) + COL_B7_BASE);
1170 priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1171 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1172 COL_MAX_VAL_MASK) + COL_B8_BASE);
1173 priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1174 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1175 COL_MAX_VAL_MASK) + COL_B9_BASE);
1176 if (width == DDRCTL_EWDTH_64) {
1177 if (memtype & MEM_TYPE_LPDDR3) {
1178 priv->col_shift[10] = ((addrmap[4] &
1179 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1180 ((addrmap[4] & COL_MAX_VAL_MASK) +
1181 COL_B10_BASE);
1182 priv->col_shift[11] = (((addrmap[4] >> 8) &
1183 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1184 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1185 COL_B11_BASE);
1186 } else {
1187 priv->col_shift[11] = ((addrmap[4] &
1188 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1189 ((addrmap[4] & COL_MAX_VAL_MASK) +
1190 COL_B10_BASE);
1191 priv->col_shift[13] = (((addrmap[4] >> 8) &
1192 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1193 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1194 COL_B11_BASE);
1195 }
1196 } else if (width == DDRCTL_EWDTH_32) {
1197 if (memtype & MEM_TYPE_LPDDR3) {
1198 priv->col_shift[10] = (((addrmap[3] >> 24) &
1199 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1200 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1201 COL_B9_BASE);
1202 priv->col_shift[11] = ((addrmap[4] &
1203 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1204 ((addrmap[4] & COL_MAX_VAL_MASK) +
1205 COL_B10_BASE);
1206 } else {
1207 priv->col_shift[11] = (((addrmap[3] >> 24) &
1208 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1209 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1210 COL_B9_BASE);
1211 priv->col_shift[13] = ((addrmap[4] &
1212 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1213 ((addrmap[4] & COL_MAX_VAL_MASK) +
1214 COL_B10_BASE);
1215 }
1216 } else {
1217 if (memtype & MEM_TYPE_LPDDR3) {
1218 priv->col_shift[10] = (((addrmap[3] >> 16) &
1219 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1220 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1221 COL_B8_BASE);
1222 priv->col_shift[11] = (((addrmap[3] >> 24) &
1223 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1224 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1225 COL_B9_BASE);
1226 priv->col_shift[13] = ((addrmap[4] &
1227 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1228 ((addrmap[4] & COL_MAX_VAL_MASK) +
1229 COL_B10_BASE);
1230 } else {
1231 priv->col_shift[11] = (((addrmap[3] >> 16) &
1232 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1233 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1234 COL_B8_BASE);
1235 priv->col_shift[13] = (((addrmap[3] >> 24) &
1236 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1237 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1238 COL_B9_BASE);
1239 }
1240 }
1241
1242 if (width) {
1243 for (index = 9; index > width; index--) {
1244 priv->col_shift[index] = priv->col_shift[index - width];
1245 priv->col_shift[index - width] = 0;
1246 }
1247 }
1248
1249 }
1250
setup_bank_address_map(struct synps_edac_priv * priv,u32 * addrmap)1251 static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1252 {
1253 priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1254 priv->bank_shift[1] = ((addrmap[1] >> 8) &
1255 BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1256 priv->bank_shift[2] = (((addrmap[1] >> 16) &
1257 BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1258 (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1259 BANK_B2_BASE);
1260
1261 }
1262
setup_bg_address_map(struct synps_edac_priv * priv,u32 * addrmap)1263 static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1264 {
1265 priv->bankgrp_shift[0] = (addrmap[8] &
1266 BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1267 priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1268 BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1269 & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1270
1271 }
1272
setup_rank_address_map(struct synps_edac_priv * priv,u32 * addrmap)1273 static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1274 {
1275 priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1276 RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1277 RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1278 }
1279
1280 /**
1281 * setup_address_map - Set Address Map by querying ADDRMAP registers.
1282 * @priv: DDR memory controller private instance data.
1283 *
1284 * Set Address Map by querying ADDRMAP registers.
1285 *
1286 * Return: none.
1287 */
setup_address_map(struct synps_edac_priv * priv)1288 static void setup_address_map(struct synps_edac_priv *priv)
1289 {
1290 u32 addrmap[12];
1291 int index;
1292
1293 for (index = 0; index < 12; index++) {
1294 u32 addrmap_offset;
1295
1296 addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1297 addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1298 }
1299
1300 setup_row_address_map(priv, addrmap);
1301
1302 setup_column_address_map(priv, addrmap);
1303
1304 setup_bank_address_map(priv, addrmap);
1305
1306 setup_bg_address_map(priv, addrmap);
1307
1308 setup_rank_address_map(priv, addrmap);
1309 }
1310 #endif /* CONFIG_EDAC_DEBUG */
1311
1312 /**
1313 * mc_probe - Check controller and bind driver.
1314 * @pdev: platform device.
1315 *
1316 * Probe a specific controller instance for binding with the driver.
1317 *
1318 * Return: 0 if the controller instance was successfully bound to the
1319 * driver; otherwise, < 0 on error.
1320 */
mc_probe(struct platform_device * pdev)1321 static int mc_probe(struct platform_device *pdev)
1322 {
1323 const struct synps_platform_data *p_data;
1324 struct edac_mc_layer layers[2];
1325 struct synps_edac_priv *priv;
1326 struct mem_ctl_info *mci;
1327 void __iomem *baseaddr;
1328 struct resource *res;
1329 int rc;
1330
1331 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1332 baseaddr = devm_ioremap_resource(&pdev->dev, res);
1333 if (IS_ERR(baseaddr))
1334 return PTR_ERR(baseaddr);
1335
1336 p_data = of_device_get_match_data(&pdev->dev);
1337 if (!p_data)
1338 return -ENODEV;
1339
1340 if (!p_data->get_ecc_state(baseaddr)) {
1341 edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1342 return -ENXIO;
1343 }
1344
1345 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1346 layers[0].size = SYNPS_EDAC_NR_CSROWS;
1347 layers[0].is_virt_csrow = true;
1348 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1349 layers[1].size = SYNPS_EDAC_NR_CHANS;
1350 layers[1].is_virt_csrow = false;
1351
1352 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1353 sizeof(struct synps_edac_priv));
1354 if (!mci) {
1355 edac_printk(KERN_ERR, EDAC_MC,
1356 "Failed memory allocation for mc instance\n");
1357 return -ENOMEM;
1358 }
1359
1360 priv = mci->pvt_info;
1361 priv->baseaddr = baseaddr;
1362 priv->p_data = p_data;
1363
1364 mc_init(mci, pdev);
1365
1366 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1367 rc = setup_irq(mci, pdev);
1368 if (rc)
1369 goto free_edac_mc;
1370 }
1371
1372 rc = edac_mc_add_mc(mci);
1373 if (rc) {
1374 edac_printk(KERN_ERR, EDAC_MC,
1375 "Failed to register with EDAC core\n");
1376 goto free_edac_mc;
1377 }
1378
1379 #ifdef CONFIG_EDAC_DEBUG
1380 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1381 rc = edac_create_sysfs_attributes(mci);
1382 if (rc) {
1383 edac_printk(KERN_ERR, EDAC_MC,
1384 "Failed to create sysfs entries\n");
1385 goto free_edac_mc;
1386 }
1387 }
1388
1389 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1390 setup_address_map(priv);
1391 #endif
1392
1393 /*
1394 * Start capturing the correctable and uncorrectable errors. A write of
1395 * 0 starts the counters.
1396 */
1397 if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1398 writel(0x0, baseaddr + ECC_CTRL_OFST);
1399
1400 return rc;
1401
1402 free_edac_mc:
1403 edac_mc_free(mci);
1404
1405 return rc;
1406 }
1407
1408 /**
1409 * mc_remove - Unbind driver from controller.
1410 * @pdev: Platform device.
1411 *
1412 * Return: Unconditionally 0
1413 */
mc_remove(struct platform_device * pdev)1414 static int mc_remove(struct platform_device *pdev)
1415 {
1416 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1417 struct synps_edac_priv *priv = mci->pvt_info;
1418
1419 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1420 disable_intr(priv);
1421
1422 #ifdef CONFIG_EDAC_DEBUG
1423 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1424 edac_remove_sysfs_attributes(mci);
1425 #endif
1426
1427 edac_mc_del_mc(&pdev->dev);
1428 edac_mc_free(mci);
1429
1430 return 0;
1431 }
1432
1433 static struct platform_driver synps_edac_mc_driver = {
1434 .driver = {
1435 .name = "synopsys-edac",
1436 .of_match_table = synps_edac_match,
1437 },
1438 .probe = mc_probe,
1439 .remove = mc_remove,
1440 };
1441
1442 module_platform_driver(synps_edac_mc_driver);
1443
1444 MODULE_AUTHOR("Xilinx Inc");
1445 MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1446 MODULE_LICENSE("GPL v2");
1447