1 /*
2  * Driver for Pondicherry2 memory controller.
3  *
4  * Copyright (c) 2016, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * [Derived from sb_edac.c]
16  *
17  * Translation of system physical addresses to DIMM addresses
18  * is a two stage process:
19  *
20  * First the Pondicherry 2 memory controller handles slice and channel interleaving
21  * in "sys2pmi()". This is (almost) completley common between platforms.
22  *
23  * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24  * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25  */
26 
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
42 #include <asm/mce.h>
43 
44 #include "edac_mc.h"
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
47 
48 #define EDAC_MOD_STR		"pnd2_edac"
49 
50 #define APL_NUM_CHANNELS	4
51 #define DNV_NUM_CHANNELS	2
52 #define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
53 
54 enum type {
55 	APL,
56 	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
57 };
58 
59 struct dram_addr {
60 	int chan;
61 	int dimm;
62 	int rank;
63 	int bank;
64 	int row;
65 	int col;
66 };
67 
68 struct pnd2_pvt {
69 	int dimm_geom[APL_NUM_CHANNELS];
70 	u64 tolm, tohm;
71 };
72 
73 /*
74  * System address space is divided into multiple regions with
75  * different interleave rules in each. The as0/as1 regions
76  * have no interleaving at all. The as2 region is interleaved
77  * between two channels. The mot region is magic and may overlap
78  * other regions, with its interleave rules taking precedence.
79  * Addresses not in any of these regions are interleaved across
80  * all four channels.
81  */
82 static struct region {
83 	u64	base;
84 	u64	limit;
85 	u8	enabled;
86 } mot, as0, as1, as2;
87 
88 static struct dunit_ops {
89 	char *name;
90 	enum type type;
91 	int pmiaddr_shift;
92 	int pmiidx_shift;
93 	int channels;
94 	int dimms_per_channel;
95 	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
96 	int (*get_registers)(void);
97 	int (*check_ecc)(void);
98 	void (*mk_region)(char *name, struct region *rp, void *asym);
99 	void (*get_dimm_config)(struct mem_ctl_info *mci);
100 	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
101 				   struct dram_addr *daddr, char *msg);
102 } *ops;
103 
104 static struct mem_ctl_info *pnd2_mci;
105 
106 #define PND2_MSG_SIZE	256
107 
108 /* Debug macros */
109 #define pnd2_printk(level, fmt, arg...)			\
110 	edac_printk(level, "pnd2", fmt, ##arg)
111 
112 #define pnd2_mc_printk(mci, level, fmt, arg...)	\
113 	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
114 
115 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
116 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
117 #define SELECTOR_DISABLED (-1)
118 #define _4GB (1ul << 32)
119 
120 #define PMI_ADDRESS_WIDTH	31
121 #define PND_MAX_PHYS_BIT	39
122 
123 #define APL_ASYMSHIFT		28
124 #define DNV_ASYMSHIFT		31
125 #define CH_HASH_MASK_LSB	6
126 #define SLICE_HASH_MASK_LSB	6
127 #define MOT_SLC_INTLV_BIT	12
128 #define LOG2_PMI_ADDR_GRANULARITY	5
129 #define MOT_SHIFT	24
130 
131 #define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
132 #define U64_LSHIFT(val, s)	((u64)(val) << (s))
133 
134 /*
135  * On Apollo Lake we access memory controller registers via a
136  * side-band mailbox style interface in a hidden PCI device
137  * configuration space.
138  */
139 static struct pci_bus	*p2sb_bus;
140 #define P2SB_DEVFN	PCI_DEVFN(0xd, 0)
141 #define P2SB_ADDR_OFF	0xd0
142 #define P2SB_DATA_OFF	0xd4
143 #define P2SB_STAT_OFF	0xd8
144 #define P2SB_ROUT_OFF	0xda
145 #define P2SB_EADD_OFF	0xdc
146 #define P2SB_HIDE_OFF	0xe1
147 
148 #define P2SB_BUSY	1
149 
150 #define P2SB_READ(size, off, ptr) \
151 	pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
152 #define P2SB_WRITE(size, off, val) \
153 	pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
154 
p2sb_is_busy(u16 * status)155 static bool p2sb_is_busy(u16 *status)
156 {
157 	P2SB_READ(word, P2SB_STAT_OFF, status);
158 
159 	return !!(*status & P2SB_BUSY);
160 }
161 
_apl_rd_reg(int port,int off,int op,u32 * data)162 static int _apl_rd_reg(int port, int off, int op, u32 *data)
163 {
164 	int retries = 0xff, ret;
165 	u16 status;
166 	u8 hidden;
167 
168 	/* Unhide the P2SB device, if it's hidden */
169 	P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
170 	if (hidden)
171 		P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
172 
173 	if (p2sb_is_busy(&status)) {
174 		ret = -EAGAIN;
175 		goto out;
176 	}
177 
178 	P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
179 	P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
180 	P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
181 	P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
182 	P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
183 
184 	while (p2sb_is_busy(&status)) {
185 		if (retries-- == 0) {
186 			ret = -EBUSY;
187 			goto out;
188 		}
189 	}
190 
191 	P2SB_READ(dword, P2SB_DATA_OFF, data);
192 	ret = (status >> 1) & 0x3;
193 out:
194 	/* Hide the P2SB device, if it was hidden before */
195 	if (hidden)
196 		P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
197 
198 	return ret;
199 }
200 
apl_rd_reg(int port,int off,int op,void * data,size_t sz,char * name)201 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
202 {
203 	int ret = 0;
204 
205 	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
206 	switch (sz) {
207 	case 8:
208 		ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
209 		/* fall through */
210 	case 4:
211 		ret |= _apl_rd_reg(port, off, op, (u32 *)data);
212 		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
213 					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
214 		break;
215 	}
216 
217 	return ret;
218 }
219 
get_mem_ctrl_hub_base_addr(void)220 static u64 get_mem_ctrl_hub_base_addr(void)
221 {
222 	struct b_cr_mchbar_lo_pci lo;
223 	struct b_cr_mchbar_hi_pci hi;
224 	struct pci_dev *pdev;
225 
226 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
227 	if (pdev) {
228 		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
229 		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
230 		pci_dev_put(pdev);
231 	} else {
232 		return 0;
233 	}
234 
235 	if (!lo.enable) {
236 		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
237 		return 0;
238 	}
239 
240 	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
241 }
242 
get_sideband_reg_base_addr(void)243 static u64 get_sideband_reg_base_addr(void)
244 {
245 	struct pci_dev *pdev;
246 	u32 hi, lo;
247 	u8 hidden;
248 
249 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
250 	if (pdev) {
251 		/* Unhide the P2SB device, if it's hidden */
252 		pci_read_config_byte(pdev, 0xe1, &hidden);
253 		if (hidden)
254 			pci_write_config_byte(pdev, 0xe1, 0);
255 
256 		pci_read_config_dword(pdev, 0x10, &lo);
257 		pci_read_config_dword(pdev, 0x14, &hi);
258 		lo &= 0xfffffff0;
259 
260 		/* Hide the P2SB device, if it was hidden before */
261 		if (hidden)
262 			pci_write_config_byte(pdev, 0xe1, hidden);
263 
264 		pci_dev_put(pdev);
265 		return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
266 	} else {
267 		return 0xfd000000;
268 	}
269 }
270 
dnv_rd_reg(int port,int off,int op,void * data,size_t sz,char * name)271 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
272 {
273 	struct pci_dev *pdev;
274 	char *base;
275 	u64 addr;
276 
277 	if (op == 4) {
278 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
279 		if (!pdev)
280 			return -ENODEV;
281 
282 		pci_read_config_dword(pdev, off, data);
283 		pci_dev_put(pdev);
284 	} else {
285 		/* MMIO via memory controller hub base address */
286 		if (op == 0 && port == 0x4c) {
287 			addr = get_mem_ctrl_hub_base_addr();
288 			if (!addr)
289 				return -ENODEV;
290 		} else {
291 			/* MMIO via sideband register base address */
292 			addr = get_sideband_reg_base_addr();
293 			if (!addr)
294 				return -ENODEV;
295 			addr += (port << 16);
296 		}
297 
298 		base = ioremap((resource_size_t)addr, 0x10000);
299 		if (!base)
300 			return -ENODEV;
301 
302 		if (sz == 8)
303 			*(u32 *)(data + 4) = *(u32 *)(base + off + 4);
304 		*(u32 *)data = *(u32 *)(base + off);
305 
306 		iounmap(base);
307 	}
308 
309 	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
310 			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
311 
312 	return 0;
313 }
314 
315 #define RD_REGP(regp, regname, port)	\
316 	ops->rd_reg(port,					\
317 		regname##_offset,				\
318 		regname##_r_opcode,				\
319 		regp, sizeof(struct regname),	\
320 		#regname)
321 
322 #define RD_REG(regp, regname)			\
323 	ops->rd_reg(regname ## _port,		\
324 		regname##_offset,				\
325 		regname##_r_opcode,				\
326 		regp, sizeof(struct regname),	\
327 		#regname)
328 
329 static u64 top_lm, top_hm;
330 static bool two_slices;
331 static bool two_channels; /* Both PMI channels in one slice enabled */
332 
333 static u8 sym_chan_mask;
334 static u8 asym_chan_mask;
335 static u8 chan_mask;
336 
337 static int slice_selector = -1;
338 static int chan_selector = -1;
339 static u64 slice_hash_mask;
340 static u64 chan_hash_mask;
341 
mk_region(char * name,struct region * rp,u64 base,u64 limit)342 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
343 {
344 	rp->enabled = 1;
345 	rp->base = base;
346 	rp->limit = limit;
347 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
348 }
349 
mk_region_mask(char * name,struct region * rp,u64 base,u64 mask)350 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
351 {
352 	if (mask == 0) {
353 		pr_info(FW_BUG "MOT mask cannot be zero\n");
354 		return;
355 	}
356 	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
357 		pr_info(FW_BUG "MOT mask not power of two\n");
358 		return;
359 	}
360 	if (base & ~mask) {
361 		pr_info(FW_BUG "MOT region base/mask alignment error\n");
362 		return;
363 	}
364 	rp->base = base;
365 	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
366 	rp->enabled = 1;
367 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
368 }
369 
in_region(struct region * rp,u64 addr)370 static bool in_region(struct region *rp, u64 addr)
371 {
372 	if (!rp->enabled)
373 		return false;
374 
375 	return rp->base <= addr && addr <= rp->limit;
376 }
377 
gen_sym_mask(struct b_cr_slice_channel_hash * p)378 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
379 {
380 	int mask = 0;
381 
382 	if (!p->slice_0_mem_disabled)
383 		mask |= p->sym_slice0_channel_enabled;
384 
385 	if (!p->slice_1_disabled)
386 		mask |= p->sym_slice1_channel_enabled << 2;
387 
388 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
389 		mask &= 0x5;
390 
391 	return mask;
392 }
393 
gen_asym_mask(struct b_cr_slice_channel_hash * p,struct b_cr_asym_mem_region0_mchbar * as0,struct b_cr_asym_mem_region1_mchbar * as1,struct b_cr_asym_2way_mem_region_mchbar * as2way)394 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
395 			 struct b_cr_asym_mem_region0_mchbar *as0,
396 			 struct b_cr_asym_mem_region1_mchbar *as1,
397 			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
398 {
399 	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
400 	int mask = 0;
401 
402 	if (as2way->asym_2way_interleave_enable)
403 		mask = intlv[as2way->asym_2way_intlv_mode];
404 	if (as0->slice0_asym_enable)
405 		mask |= (1 << as0->slice0_asym_channel_select);
406 	if (as1->slice1_asym_enable)
407 		mask |= (4 << as1->slice1_asym_channel_select);
408 	if (p->slice_0_mem_disabled)
409 		mask &= 0xc;
410 	if (p->slice_1_disabled)
411 		mask &= 0x3;
412 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
413 		mask &= 0x5;
414 
415 	return mask;
416 }
417 
418 static struct b_cr_tolud_pci tolud;
419 static struct b_cr_touud_lo_pci touud_lo;
420 static struct b_cr_touud_hi_pci touud_hi;
421 static struct b_cr_asym_mem_region0_mchbar asym0;
422 static struct b_cr_asym_mem_region1_mchbar asym1;
423 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
424 static struct b_cr_mot_out_base_mchbar mot_base;
425 static struct b_cr_mot_out_mask_mchbar mot_mask;
426 static struct b_cr_slice_channel_hash chash;
427 
428 /* Apollo Lake dunit */
429 /*
430  * Validated on board with just two DIMMs in the [0] and [2] positions
431  * in this array. Other port number matches documentation, but caution
432  * advised.
433  */
434 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
435 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
436 
437 /* Denverton dunit */
438 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
439 static struct d_cr_dsch dsch;
440 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
441 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
442 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
443 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
444 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
445 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
446 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
447 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
448 
apl_mk_region(char * name,struct region * rp,void * asym)449 static void apl_mk_region(char *name, struct region *rp, void *asym)
450 {
451 	struct b_cr_asym_mem_region0_mchbar *a = asym;
452 
453 	mk_region(name, rp,
454 			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
455 			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
456 			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
457 }
458 
dnv_mk_region(char * name,struct region * rp,void * asym)459 static void dnv_mk_region(char *name, struct region *rp, void *asym)
460 {
461 	struct b_cr_asym_mem_region_denverton *a = asym;
462 
463 	mk_region(name, rp,
464 			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
465 			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
466 			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
467 }
468 
apl_get_registers(void)469 static int apl_get_registers(void)
470 {
471 	int ret = -ENODEV;
472 	int i;
473 
474 	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
475 		return -ENODEV;
476 
477 	/*
478 	 * RD_REGP() will fail for unpopulated or non-existent
479 	 * DIMM slots. Return success if we find at least one DIMM.
480 	 */
481 	for (i = 0; i < APL_NUM_CHANNELS; i++)
482 		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
483 			ret = 0;
484 
485 	return ret;
486 }
487 
dnv_get_registers(void)488 static int dnv_get_registers(void)
489 {
490 	int i;
491 
492 	if (RD_REG(&dsch, d_cr_dsch))
493 		return -ENODEV;
494 
495 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
496 		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
497 			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
498 			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
499 			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
500 			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
501 			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
502 			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
503 			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
504 			return -ENODEV;
505 
506 	return 0;
507 }
508 
509 /*
510  * Read all the h/w config registers once here (they don't
511  * change at run time. Figure out which address ranges have
512  * which interleave characteristics.
513  */
get_registers(void)514 static int get_registers(void)
515 {
516 	const int intlv[] = { 10, 11, 12, 12 };
517 
518 	if (RD_REG(&tolud, b_cr_tolud_pci) ||
519 		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
520 		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
521 		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
522 		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
523 		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
524 		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
525 		RD_REG(&chash, b_cr_slice_channel_hash))
526 		return -ENODEV;
527 
528 	if (ops->get_registers())
529 		return -ENODEV;
530 
531 	if (ops->type == DNV) {
532 		/* PMI channel idx (always 0) for asymmetric region */
533 		asym0.slice0_asym_channel_select = 0;
534 		asym1.slice1_asym_channel_select = 0;
535 		/* PMI channel bitmap (always 1) for symmetric region */
536 		chash.sym_slice0_channel_enabled = 0x1;
537 		chash.sym_slice1_channel_enabled = 0x1;
538 	}
539 
540 	if (asym0.slice0_asym_enable)
541 		ops->mk_region("as0", &as0, &asym0);
542 
543 	if (asym1.slice1_asym_enable)
544 		ops->mk_region("as1", &as1, &asym1);
545 
546 	if (asym_2way.asym_2way_interleave_enable) {
547 		mk_region("as2way", &as2,
548 				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
549 				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
550 				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
551 	}
552 
553 	if (mot_base.imr_en) {
554 		mk_region_mask("mot", &mot,
555 					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
556 					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
557 	}
558 
559 	top_lm = U64_LSHIFT(tolud.tolud, 20);
560 	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
561 
562 	two_slices = !chash.slice_1_disabled &&
563 				 !chash.slice_0_mem_disabled &&
564 				 (chash.sym_slice0_channel_enabled != 0) &&
565 				 (chash.sym_slice1_channel_enabled != 0);
566 	two_channels = !chash.ch_1_disabled &&
567 				 !chash.enable_pmi_dual_data_mode &&
568 				 ((chash.sym_slice0_channel_enabled == 3) ||
569 				 (chash.sym_slice1_channel_enabled == 3));
570 
571 	sym_chan_mask = gen_sym_mask(&chash);
572 	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
573 	chan_mask = sym_chan_mask | asym_chan_mask;
574 
575 	if (two_slices && !two_channels) {
576 		if (chash.hvm_mode)
577 			slice_selector = 29;
578 		else
579 			slice_selector = intlv[chash.interleave_mode];
580 	} else if (!two_slices && two_channels) {
581 		if (chash.hvm_mode)
582 			chan_selector = 29;
583 		else
584 			chan_selector = intlv[chash.interleave_mode];
585 	} else if (two_slices && two_channels) {
586 		if (chash.hvm_mode) {
587 			slice_selector = 29;
588 			chan_selector = 30;
589 		} else {
590 			slice_selector = intlv[chash.interleave_mode];
591 			chan_selector = intlv[chash.interleave_mode] + 1;
592 		}
593 	}
594 
595 	if (two_slices) {
596 		if (!chash.hvm_mode)
597 			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
598 		if (!two_channels)
599 			slice_hash_mask |= BIT_ULL(slice_selector);
600 	}
601 
602 	if (two_channels) {
603 		if (!chash.hvm_mode)
604 			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
605 		if (!two_slices)
606 			chan_hash_mask |= BIT_ULL(chan_selector);
607 	}
608 
609 	return 0;
610 }
611 
612 /* Get a contiguous memory address (remove the MMIO gap) */
remove_mmio_gap(u64 sys)613 static u64 remove_mmio_gap(u64 sys)
614 {
615 	return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
616 }
617 
618 /* Squeeze out one address bit, shift upper part down to fill gap */
remove_addr_bit(u64 * addr,int bitidx)619 static void remove_addr_bit(u64 *addr, int bitidx)
620 {
621 	u64	mask;
622 
623 	if (bitidx == -1)
624 		return;
625 
626 	mask = (1ull << bitidx) - 1;
627 	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
628 }
629 
630 /* XOR all the bits from addr specified in mask */
hash_by_mask(u64 addr,u64 mask)631 static int hash_by_mask(u64 addr, u64 mask)
632 {
633 	u64 result = addr & mask;
634 
635 	result = (result >> 32) ^ result;
636 	result = (result >> 16) ^ result;
637 	result = (result >> 8) ^ result;
638 	result = (result >> 4) ^ result;
639 	result = (result >> 2) ^ result;
640 	result = (result >> 1) ^ result;
641 
642 	return (int)result & 1;
643 }
644 
645 /*
646  * First stage decode. Take the system address and figure out which
647  * second stage will deal with it based on interleave modes.
648  */
sys2pmi(const u64 addr,u32 * pmiidx,u64 * pmiaddr,char * msg)649 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
650 {
651 	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
652 	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
653 						MOT_CHAN_INTLV_BIT_1SLC_2CH;
654 	int slice_intlv_bit_rm = SELECTOR_DISABLED;
655 	int chan_intlv_bit_rm = SELECTOR_DISABLED;
656 	/* Determine if address is in the MOT region. */
657 	bool mot_hit = in_region(&mot, addr);
658 	/* Calculate the number of symmetric regions enabled. */
659 	int sym_channels = hweight8(sym_chan_mask);
660 
661 	/*
662 	 * The amount we need to shift the asym base can be determined by the
663 	 * number of enabled symmetric channels.
664 	 * NOTE: This can only work because symmetric memory is not supposed
665 	 * to do a 3-way interleave.
666 	 */
667 	int sym_chan_shift = sym_channels >> 1;
668 
669 	/* Give up if address is out of range, or in MMIO gap */
670 	if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
671 	   (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
672 		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
673 		return -EINVAL;
674 	}
675 
676 	/* Get a contiguous memory address (remove the MMIO gap) */
677 	contig_addr = remove_mmio_gap(addr);
678 
679 	if (in_region(&as0, addr)) {
680 		*pmiidx = asym0.slice0_asym_channel_select;
681 
682 		contig_base = remove_mmio_gap(as0.base);
683 		contig_offset = contig_addr - contig_base;
684 		contig_base_adj = (contig_base >> sym_chan_shift) *
685 						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
686 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
687 	} else if (in_region(&as1, addr)) {
688 		*pmiidx = 2u + asym1.slice1_asym_channel_select;
689 
690 		contig_base = remove_mmio_gap(as1.base);
691 		contig_offset = contig_addr - contig_base;
692 		contig_base_adj = (contig_base >> sym_chan_shift) *
693 						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
694 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
695 	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
696 		bool channel1;
697 
698 		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
699 		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
700 		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
701 			hash_by_mask(contig_addr, chan_hash_mask);
702 		*pmiidx |= (u32)channel1;
703 
704 		contig_base = remove_mmio_gap(as2.base);
705 		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
706 		contig_offset = contig_addr - contig_base;
707 		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
708 		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
709 	} else {
710 		/* Otherwise we're in normal, boring symmetric mode. */
711 		*pmiidx = 0u;
712 
713 		if (two_slices) {
714 			bool slice1;
715 
716 			if (mot_hit) {
717 				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
718 				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
719 			} else {
720 				slice_intlv_bit_rm = slice_selector;
721 				slice1 = hash_by_mask(addr, slice_hash_mask);
722 			}
723 
724 			*pmiidx = (u32)slice1 << 1;
725 		}
726 
727 		if (two_channels) {
728 			bool channel1;
729 
730 			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
731 							MOT_CHAN_INTLV_BIT_1SLC_2CH;
732 
733 			if (mot_hit) {
734 				chan_intlv_bit_rm = mot_intlv_bit;
735 				channel1 = (addr >> mot_intlv_bit) & 1;
736 			} else {
737 				chan_intlv_bit_rm = chan_selector;
738 				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
739 			}
740 
741 			*pmiidx |= (u32)channel1;
742 		}
743 	}
744 
745 	/* Remove the chan_selector bit first */
746 	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
747 	/* Remove the slice bit (we remove it second because it must be lower */
748 	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
749 	*pmiaddr = contig_addr;
750 
751 	return 0;
752 }
753 
754 /* Translate PMI address to memory (rank, row, bank, column) */
755 #define C(n) (0x10 | (n))	/* column */
756 #define B(n) (0x20 | (n))	/* bank */
757 #define R(n) (0x40 | (n))	/* row */
758 #define RS   (0x80)			/* rank */
759 
760 /* addrdec values */
761 #define AMAP_1KB	0
762 #define AMAP_2KB	1
763 #define AMAP_4KB	2
764 #define AMAP_RSVD	3
765 
766 /* dden values */
767 #define DEN_4Gb		0
768 #define DEN_8Gb		2
769 
770 /* dwid values */
771 #define X8		0
772 #define X16		1
773 
774 static struct dimm_geometry {
775 	u8	addrdec;
776 	u8	dden;
777 	u8	dwid;
778 	u8	rowbits, colbits;
779 	u16	bits[PMI_ADDRESS_WIDTH];
780 } dimms[] = {
781 	{
782 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
783 		.rowbits = 15, .colbits = 10,
784 		.bits = {
785 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
786 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
787 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
788 			0,     0,     0,     0
789 		}
790 	},
791 	{
792 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
793 		.rowbits = 16, .colbits = 10,
794 		.bits = {
795 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
796 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
797 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
798 			R(15), 0,     0,     0
799 		}
800 	},
801 	{
802 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
803 		.rowbits = 16, .colbits = 10,
804 		.bits = {
805 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
806 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
807 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
808 			R(15), 0,     0,     0
809 		}
810 	},
811 	{
812 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
813 		.rowbits = 16, .colbits = 11,
814 		.bits = {
815 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
816 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
817 			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
818 			R(14), R(15), 0,     0
819 		}
820 	},
821 	{
822 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
823 		.rowbits = 15, .colbits = 10,
824 		.bits = {
825 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
826 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
827 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
828 			0,     0,     0,     0
829 		}
830 	},
831 	{
832 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
833 		.rowbits = 16, .colbits = 10,
834 		.bits = {
835 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
836 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
837 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
838 			R(15), 0,     0,     0
839 		}
840 	},
841 	{
842 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
843 		.rowbits = 16, .colbits = 10,
844 		.bits = {
845 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
846 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
847 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
848 			R(15), 0,     0,     0
849 		}
850 	},
851 	{
852 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
853 		.rowbits = 16, .colbits = 11,
854 		.bits = {
855 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
856 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
857 			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
858 			R(14), R(15), 0,     0
859 		}
860 	},
861 	{
862 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
863 		.rowbits = 15, .colbits = 10,
864 		.bits = {
865 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
866 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
867 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
868 			0,     0,     0,     0
869 		}
870 	},
871 	{
872 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
873 		.rowbits = 16, .colbits = 10,
874 		.bits = {
875 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
876 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
877 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
878 			R(15), 0,     0,     0
879 		}
880 	},
881 	{
882 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
883 		.rowbits = 16, .colbits = 10,
884 		.bits = {
885 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
886 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
887 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
888 			R(15), 0,     0,     0
889 		}
890 	},
891 	{
892 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
893 		.rowbits = 16, .colbits = 11,
894 		.bits = {
895 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
896 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
897 			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
898 			R(14), R(15), 0,     0
899 		}
900 	}
901 };
902 
bank_hash(u64 pmiaddr,int idx,int shft)903 static int bank_hash(u64 pmiaddr, int idx, int shft)
904 {
905 	int bhash = 0;
906 
907 	switch (idx) {
908 	case 0:
909 		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
910 		break;
911 	case 1:
912 		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
913 		bhash ^= ((pmiaddr >> 22) & 1) << 1;
914 		break;
915 	case 2:
916 		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
917 		break;
918 	}
919 
920 	return bhash;
921 }
922 
rank_hash(u64 pmiaddr)923 static int rank_hash(u64 pmiaddr)
924 {
925 	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
926 }
927 
928 /* Second stage decode. Compute rank, bank, row & column. */
apl_pmi2mem(struct mem_ctl_info * mci,u64 pmiaddr,u32 pmiidx,struct dram_addr * daddr,char * msg)929 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
930 		       struct dram_addr *daddr, char *msg)
931 {
932 	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
933 	struct pnd2_pvt *pvt = mci->pvt_info;
934 	int g = pvt->dimm_geom[pmiidx];
935 	struct dimm_geometry *d = &dimms[g];
936 	int column = 0, bank = 0, row = 0, rank = 0;
937 	int i, idx, type, skiprs = 0;
938 
939 	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
940 		int	bit = (pmiaddr >> i) & 1;
941 
942 		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
943 			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
944 			return -EINVAL;
945 		}
946 
947 		type = d->bits[i + skiprs] & ~0xf;
948 		idx = d->bits[i + skiprs] & 0xf;
949 
950 		/*
951 		 * On single rank DIMMs ignore the rank select bit
952 		 * and shift remainder of "bits[]" down one place.
953 		 */
954 		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
955 			skiprs = 1;
956 			type = d->bits[i + skiprs] & ~0xf;
957 			idx = d->bits[i + skiprs] & 0xf;
958 		}
959 
960 		switch (type) {
961 		case C(0):
962 			column |= (bit << idx);
963 			break;
964 		case B(0):
965 			bank |= (bit << idx);
966 			if (cr_drp0->bahen)
967 				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
968 			break;
969 		case R(0):
970 			row |= (bit << idx);
971 			break;
972 		case RS:
973 			rank = bit;
974 			if (cr_drp0->rsien)
975 				rank ^= rank_hash(pmiaddr);
976 			break;
977 		default:
978 			if (bit) {
979 				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
980 				return -EINVAL;
981 			}
982 			goto done;
983 		}
984 	}
985 
986 done:
987 	daddr->col = column;
988 	daddr->bank = bank;
989 	daddr->row = row;
990 	daddr->rank = rank;
991 	daddr->dimm = 0;
992 
993 	return 0;
994 }
995 
996 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
997 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
998 
dnv_pmi2mem(struct mem_ctl_info * mci,u64 pmiaddr,u32 pmiidx,struct dram_addr * daddr,char * msg)999 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
1000 					   struct dram_addr *daddr, char *msg)
1001 {
1002 	/* Rank 0 or 1 */
1003 	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1004 	/* Rank 2 or 3 */
1005 	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1006 
1007 	/*
1008 	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1009 	 * flip them if DIMM1 is larger than DIMM0.
1010 	 */
1011 	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1012 
1013 	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1014 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1015 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1016 	if (dsch.ddr4en)
1017 		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1018 	if (dmap1[pmiidx].bxor) {
1019 		if (dsch.ddr4en) {
1020 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1021 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1022 			if (dsch.chan_width == 0)
1023 				/* 64/72 bit dram channel width */
1024 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1025 			else
1026 				/* 32/40 bit dram channel width */
1027 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1028 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1029 		} else {
1030 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1031 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1032 			if (dsch.chan_width == 0)
1033 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1034 			else
1035 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1036 		}
1037 	}
1038 
1039 	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1040 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1041 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1042 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1043 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1044 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1045 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1046 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1047 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1048 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1049 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1050 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1051 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1052 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1053 	if (dmap4[pmiidx].row14 != 31)
1054 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1055 	if (dmap4[pmiidx].row15 != 31)
1056 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1057 	if (dmap4[pmiidx].row16 != 31)
1058 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1059 	if (dmap4[pmiidx].row17 != 31)
1060 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1061 
1062 	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1063 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1064 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1065 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1066 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1067 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1068 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1069 	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1070 		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1071 
1072 	return 0;
1073 }
1074 
check_channel(int ch)1075 static int check_channel(int ch)
1076 {
1077 	if (drp0[ch].dramtype != 0) {
1078 		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1079 		return 1;
1080 	} else if (drp0[ch].eccen == 0) {
1081 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1082 		return 1;
1083 	}
1084 	return 0;
1085 }
1086 
apl_check_ecc_active(void)1087 static int apl_check_ecc_active(void)
1088 {
1089 	int	i, ret = 0;
1090 
1091 	/* Check dramtype and ECC mode for each present DIMM */
1092 	for (i = 0; i < APL_NUM_CHANNELS; i++)
1093 		if (chan_mask & BIT(i))
1094 			ret += check_channel(i);
1095 	return ret ? -EINVAL : 0;
1096 }
1097 
1098 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1099 
check_unit(int ch)1100 static int check_unit(int ch)
1101 {
1102 	struct d_cr_drp *d = &drp[ch];
1103 
1104 	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1105 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1106 		return 1;
1107 	}
1108 	return 0;
1109 }
1110 
dnv_check_ecc_active(void)1111 static int dnv_check_ecc_active(void)
1112 {
1113 	int	i, ret = 0;
1114 
1115 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1116 		ret += check_unit(i);
1117 	return ret ? -EINVAL : 0;
1118 }
1119 
get_memory_error_data(struct mem_ctl_info * mci,u64 addr,struct dram_addr * daddr,char * msg)1120 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1121 								 struct dram_addr *daddr, char *msg)
1122 {
1123 	u64	pmiaddr;
1124 	u32	pmiidx;
1125 	int	ret;
1126 
1127 	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1128 	if (ret)
1129 		return ret;
1130 
1131 	pmiaddr >>= ops->pmiaddr_shift;
1132 	/* pmi channel idx to dimm channel idx */
1133 	pmiidx >>= ops->pmiidx_shift;
1134 	daddr->chan = pmiidx;
1135 
1136 	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1137 	if (ret)
1138 		return ret;
1139 
1140 	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1141 			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1142 
1143 	return 0;
1144 }
1145 
pnd2_mce_output_error(struct mem_ctl_info * mci,const struct mce * m,struct dram_addr * daddr)1146 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1147 				  struct dram_addr *daddr)
1148 {
1149 	enum hw_event_mc_err_type tp_event;
1150 	char *optype, msg[PND2_MSG_SIZE];
1151 	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1152 	bool overflow = m->status & MCI_STATUS_OVER;
1153 	bool uc_err = m->status & MCI_STATUS_UC;
1154 	bool recov = m->status & MCI_STATUS_S;
1155 	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1156 	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1157 	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1158 	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1159 	int rc;
1160 
1161 	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1162 						 HW_EVENT_ERR_CORRECTED;
1163 
1164 	/*
1165 	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1166 	 * memory errors should fit in this mask:
1167 	 *	000f 0000 1mmm cccc (binary)
1168 	 * where:
1169 	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1170 	 *	    won't be shown
1171 	 *	mmm = error type
1172 	 *	cccc = channel
1173 	 * If the mask doesn't match, report an error to the parsing logic
1174 	 */
1175 	if (!((errcode & 0xef80) == 0x80)) {
1176 		optype = "Can't parse: it is not a mem";
1177 	} else {
1178 		switch (optypenum) {
1179 		case 0:
1180 			optype = "generic undef request error";
1181 			break;
1182 		case 1:
1183 			optype = "memory read error";
1184 			break;
1185 		case 2:
1186 			optype = "memory write error";
1187 			break;
1188 		case 3:
1189 			optype = "addr/cmd error";
1190 			break;
1191 		case 4:
1192 			optype = "memory scrubbing error";
1193 			break;
1194 		default:
1195 			optype = "reserved";
1196 			break;
1197 		}
1198 	}
1199 
1200 	/* Only decode errors with an valid address (ADDRV) */
1201 	if (!(m->status & MCI_STATUS_ADDRV))
1202 		return;
1203 
1204 	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1205 	if (rc)
1206 		goto address_error;
1207 
1208 	snprintf(msg, sizeof(msg),
1209 		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1210 		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1211 		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1212 
1213 	edac_dbg(0, "%s\n", msg);
1214 
1215 	/* Call the helper to output message */
1216 	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1217 						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1218 
1219 	return;
1220 
1221 address_error:
1222 	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1223 }
1224 
apl_get_dimm_config(struct mem_ctl_info * mci)1225 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1226 {
1227 	struct pnd2_pvt	*pvt = mci->pvt_info;
1228 	struct dimm_info *dimm;
1229 	struct d_cr_drp0 *d;
1230 	u64	capacity;
1231 	int	i, g;
1232 
1233 	for (i = 0; i < APL_NUM_CHANNELS; i++) {
1234 		if (!(chan_mask & BIT(i)))
1235 			continue;
1236 
1237 		dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1238 		if (!dimm) {
1239 			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1240 			continue;
1241 		}
1242 
1243 		d = &drp0[i];
1244 		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1245 			if (dimms[g].addrdec == d->addrdec &&
1246 			    dimms[g].dden == d->dden &&
1247 			    dimms[g].dwid == d->dwid)
1248 				break;
1249 
1250 		if (g == ARRAY_SIZE(dimms)) {
1251 			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1252 			continue;
1253 		}
1254 
1255 		pvt->dimm_geom[i] = g;
1256 		capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1257 				   (1ul << dimms[g].colbits);
1258 		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1259 		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1260 		dimm->grain = 32;
1261 		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1262 		dimm->mtype = MEM_DDR3;
1263 		dimm->edac_mode = EDAC_SECDED;
1264 		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1265 	}
1266 }
1267 
1268 static const int dnv_dtypes[] = {
1269 	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1270 };
1271 
dnv_get_dimm_config(struct mem_ctl_info * mci)1272 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1273 {
1274 	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1275 	struct dimm_info *dimm;
1276 	struct d_cr_drp *d;
1277 	u64	capacity;
1278 
1279 	if (dsch.ddr4en) {
1280 		memtype = MEM_DDR4;
1281 		banks = 16;
1282 		colbits = 10;
1283 	} else {
1284 		memtype = MEM_DDR3;
1285 		banks = 8;
1286 	}
1287 
1288 	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1289 		if (dmap4[i].row14 == 31)
1290 			rowbits = 14;
1291 		else if (dmap4[i].row15 == 31)
1292 			rowbits = 15;
1293 		else if (dmap4[i].row16 == 31)
1294 			rowbits = 16;
1295 		else if (dmap4[i].row17 == 31)
1296 			rowbits = 17;
1297 		else
1298 			rowbits = 18;
1299 
1300 		if (memtype == MEM_DDR3) {
1301 			if (dmap1[i].ca11 != 0x3f)
1302 				colbits = 12;
1303 			else
1304 				colbits = 10;
1305 		}
1306 
1307 		d = &drp[i];
1308 		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1309 		ranks_of_dimm[0] = d->rken0 + d->rken1;
1310 		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1311 		ranks_of_dimm[1] = d->rken2 + d->rken3;
1312 
1313 		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1314 			if (!ranks_of_dimm[j])
1315 				continue;
1316 
1317 			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1318 			if (!dimm) {
1319 				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1320 				continue;
1321 			}
1322 
1323 			capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1324 			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1325 			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1326 			dimm->grain = 32;
1327 			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1328 			dimm->mtype = memtype;
1329 			dimm->edac_mode = EDAC_SECDED;
1330 			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1331 		}
1332 	}
1333 }
1334 
pnd2_register_mci(struct mem_ctl_info ** ppmci)1335 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1336 {
1337 	struct edac_mc_layer layers[2];
1338 	struct mem_ctl_info *mci;
1339 	struct pnd2_pvt *pvt;
1340 	int rc;
1341 
1342 	rc = ops->check_ecc();
1343 	if (rc < 0)
1344 		return rc;
1345 
1346 	/* Allocate a new MC control structure */
1347 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1348 	layers[0].size = ops->channels;
1349 	layers[0].is_virt_csrow = false;
1350 	layers[1].type = EDAC_MC_LAYER_SLOT;
1351 	layers[1].size = ops->dimms_per_channel;
1352 	layers[1].is_virt_csrow = true;
1353 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1354 	if (!mci)
1355 		return -ENOMEM;
1356 
1357 	pvt = mci->pvt_info;
1358 	memset(pvt, 0, sizeof(*pvt));
1359 
1360 	mci->mod_name = EDAC_MOD_STR;
1361 	mci->dev_name = ops->name;
1362 	mci->ctl_name = "Pondicherry2";
1363 
1364 	/* Get dimm basic config and the memory layout */
1365 	ops->get_dimm_config(mci);
1366 
1367 	if (edac_mc_add_mc(mci)) {
1368 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1369 		edac_mc_free(mci);
1370 		return -EINVAL;
1371 	}
1372 
1373 	*ppmci = mci;
1374 
1375 	return 0;
1376 }
1377 
pnd2_unregister_mci(struct mem_ctl_info * mci)1378 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1379 {
1380 	if (unlikely(!mci || !mci->pvt_info)) {
1381 		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1382 		return;
1383 	}
1384 
1385 	/* Remove MC sysfs nodes */
1386 	edac_mc_del_mc(NULL);
1387 	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1388 	edac_mc_free(mci);
1389 }
1390 
1391 /*
1392  * Callback function registered with core kernel mce code.
1393  * Called once for each logged error.
1394  */
pnd2_mce_check_error(struct notifier_block * nb,unsigned long val,void * data)1395 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1396 {
1397 	struct mce *mce = (struct mce *)data;
1398 	struct mem_ctl_info *mci;
1399 	struct dram_addr daddr;
1400 	char *type;
1401 
1402 	if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1403 		return NOTIFY_DONE;
1404 
1405 	mci = pnd2_mci;
1406 	if (!mci)
1407 		return NOTIFY_DONE;
1408 
1409 	/*
1410 	 * Just let mcelog handle it if the error is
1411 	 * outside the memory controller. A memory error
1412 	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1413 	 * bit 12 has an special meaning.
1414 	 */
1415 	if ((mce->status & 0xefff) >> 7 != 1)
1416 		return NOTIFY_DONE;
1417 
1418 	if (mce->mcgstatus & MCG_STATUS_MCIP)
1419 		type = "Exception";
1420 	else
1421 		type = "Event";
1422 
1423 	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1424 	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1425 				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1426 	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1427 	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1428 	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1429 	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1430 				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1431 
1432 	pnd2_mce_output_error(mci, mce, &daddr);
1433 
1434 	/* Advice mcelog that the error were handled */
1435 	return NOTIFY_STOP;
1436 }
1437 
1438 static struct notifier_block pnd2_mce_dec = {
1439 	.notifier_call	= pnd2_mce_check_error,
1440 };
1441 
1442 #ifdef CONFIG_EDAC_DEBUG
1443 /*
1444  * Write an address to this file to exercise the address decode
1445  * logic in this driver.
1446  */
1447 static u64 pnd2_fake_addr;
1448 #define PND2_BLOB_SIZE 1024
1449 static char pnd2_result[PND2_BLOB_SIZE];
1450 static struct dentry *pnd2_test;
1451 static struct debugfs_blob_wrapper pnd2_blob = {
1452 	.data = pnd2_result,
1453 	.size = 0
1454 };
1455 
debugfs_u64_set(void * data,u64 val)1456 static int debugfs_u64_set(void *data, u64 val)
1457 {
1458 	struct dram_addr daddr;
1459 	struct mce m;
1460 
1461 	*(u64 *)data = val;
1462 	m.mcgstatus = 0;
1463 	/* ADDRV + MemRd + Unknown channel */
1464 	m.status = MCI_STATUS_ADDRV + 0x9f;
1465 	m.addr = val;
1466 	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1467 	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1468 			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1469 			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1470 	pnd2_blob.size = strlen(pnd2_blob.data);
1471 
1472 	return 0;
1473 }
1474 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1475 
setup_pnd2_debug(void)1476 static void setup_pnd2_debug(void)
1477 {
1478 	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1479 	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1480 							 &pnd2_fake_addr, &fops_u64_wo);
1481 	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1482 }
1483 
teardown_pnd2_debug(void)1484 static void teardown_pnd2_debug(void)
1485 {
1486 	debugfs_remove_recursive(pnd2_test);
1487 }
1488 #else
setup_pnd2_debug(void)1489 static void setup_pnd2_debug(void)	{}
teardown_pnd2_debug(void)1490 static void teardown_pnd2_debug(void)	{}
1491 #endif /* CONFIG_EDAC_DEBUG */
1492 
1493 
pnd2_probe(void)1494 static int pnd2_probe(void)
1495 {
1496 	int rc;
1497 
1498 	edac_dbg(2, "\n");
1499 	rc = get_registers();
1500 	if (rc)
1501 		return rc;
1502 
1503 	return pnd2_register_mci(&pnd2_mci);
1504 }
1505 
pnd2_remove(void)1506 static void pnd2_remove(void)
1507 {
1508 	edac_dbg(0, "\n");
1509 	pnd2_unregister_mci(pnd2_mci);
1510 }
1511 
1512 static struct dunit_ops apl_ops = {
1513 		.name			= "pnd2/apl",
1514 		.type			= APL,
1515 		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1516 		.pmiidx_shift		= 0,
1517 		.channels		= APL_NUM_CHANNELS,
1518 		.dimms_per_channel	= 1,
1519 		.rd_reg			= apl_rd_reg,
1520 		.get_registers		= apl_get_registers,
1521 		.check_ecc		= apl_check_ecc_active,
1522 		.mk_region		= apl_mk_region,
1523 		.get_dimm_config	= apl_get_dimm_config,
1524 		.pmi2mem		= apl_pmi2mem,
1525 };
1526 
1527 static struct dunit_ops dnv_ops = {
1528 		.name			= "pnd2/dnv",
1529 		.type			= DNV,
1530 		.pmiaddr_shift		= 0,
1531 		.pmiidx_shift		= 1,
1532 		.channels		= DNV_NUM_CHANNELS,
1533 		.dimms_per_channel	= 2,
1534 		.rd_reg			= dnv_rd_reg,
1535 		.get_registers		= dnv_get_registers,
1536 		.check_ecc		= dnv_check_ecc_active,
1537 		.mk_region		= dnv_mk_region,
1538 		.get_dimm_config	= dnv_get_dimm_config,
1539 		.pmi2mem		= dnv_pmi2mem,
1540 };
1541 
1542 static const struct x86_cpu_id pnd2_cpuids[] = {
1543 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1544 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1545 	{ }
1546 };
1547 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1548 
pnd2_init(void)1549 static int __init pnd2_init(void)
1550 {
1551 	const struct x86_cpu_id *id;
1552 	const char *owner;
1553 	int rc;
1554 
1555 	edac_dbg(2, "\n");
1556 
1557 	owner = edac_get_owner();
1558 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1559 		return -EBUSY;
1560 
1561 	id = x86_match_cpu(pnd2_cpuids);
1562 	if (!id)
1563 		return -ENODEV;
1564 
1565 	ops = (struct dunit_ops *)id->driver_data;
1566 
1567 	if (ops->type == APL) {
1568 		p2sb_bus = pci_find_bus(0, 0);
1569 		if (!p2sb_bus)
1570 			return -ENODEV;
1571 	}
1572 
1573 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1574 	opstate_init();
1575 
1576 	rc = pnd2_probe();
1577 	if (rc < 0) {
1578 		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1579 		return rc;
1580 	}
1581 
1582 	if (!pnd2_mci)
1583 		return -ENODEV;
1584 
1585 	mce_register_decode_chain(&pnd2_mce_dec);
1586 	setup_pnd2_debug();
1587 
1588 	return 0;
1589 }
1590 
pnd2_exit(void)1591 static void __exit pnd2_exit(void)
1592 {
1593 	edac_dbg(2, "\n");
1594 	teardown_pnd2_debug();
1595 	mce_unregister_decode_chain(&pnd2_mce_dec);
1596 	pnd2_remove();
1597 }
1598 
1599 module_init(pnd2_init);
1600 module_exit(pnd2_exit);
1601 
1602 module_param(edac_op_state, int, 0444);
1603 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1604 
1605 MODULE_LICENSE("GPL v2");
1606 MODULE_AUTHOR("Tony Luck");
1607 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1608