1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
4 
5 static struct edac_pci_ctl_info *pci_ctl;
6 
7 static int report_gart_errors;
8 module_param(report_gart_errors, int, 0644);
9 
10 /*
11  * Set by command line parameter. If BIOS has enabled the ECC, this override is
12  * cleared to prevent re-enabling the hardware by this driver.
13  */
14 static int ecc_enable_override;
15 module_param(ecc_enable_override, int, 0644);
16 
17 static struct msr __percpu *msrs;
18 
19 /* Per-node stuff */
20 static struct ecc_settings **ecc_stngs;
21 
22 /* Number of Unified Memory Controllers */
23 static u8 num_umcs;
24 
25 /*
26  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
27  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
28  * or higher value'.
29  *
30  *FIXME: Produce a better mapping/linearisation.
31  */
32 static const struct scrubrate {
33        u32 scrubval;           /* bit pattern for scrub rate */
34        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
35 } scrubrates[] = {
36 	{ 0x01, 1600000000UL},
37 	{ 0x02, 800000000UL},
38 	{ 0x03, 400000000UL},
39 	{ 0x04, 200000000UL},
40 	{ 0x05, 100000000UL},
41 	{ 0x06, 50000000UL},
42 	{ 0x07, 25000000UL},
43 	{ 0x08, 12284069UL},
44 	{ 0x09, 6274509UL},
45 	{ 0x0A, 3121951UL},
46 	{ 0x0B, 1560975UL},
47 	{ 0x0C, 781440UL},
48 	{ 0x0D, 390720UL},
49 	{ 0x0E, 195300UL},
50 	{ 0x0F, 97650UL},
51 	{ 0x10, 48854UL},
52 	{ 0x11, 24427UL},
53 	{ 0x12, 12213UL},
54 	{ 0x13, 6101UL},
55 	{ 0x14, 3051UL},
56 	{ 0x15, 1523UL},
57 	{ 0x16, 761UL},
58 	{ 0x00, 0UL},        /* scrubbing off */
59 };
60 
__amd64_read_pci_cfg_dword(struct pci_dev * pdev,int offset,u32 * val,const char * func)61 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
62 			       u32 *val, const char *func)
63 {
64 	int err = 0;
65 
66 	err = pci_read_config_dword(pdev, offset, val);
67 	if (err)
68 		amd64_warn("%s: error reading F%dx%03x.\n",
69 			   func, PCI_FUNC(pdev->devfn), offset);
70 
71 	return err;
72 }
73 
__amd64_write_pci_cfg_dword(struct pci_dev * pdev,int offset,u32 val,const char * func)74 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
75 				u32 val, const char *func)
76 {
77 	int err = 0;
78 
79 	err = pci_write_config_dword(pdev, offset, val);
80 	if (err)
81 		amd64_warn("%s: error writing to F%dx%03x.\n",
82 			   func, PCI_FUNC(pdev->devfn), offset);
83 
84 	return err;
85 }
86 
87 /*
88  * Select DCT to which PCI cfg accesses are routed
89  */
f15h_select_dct(struct amd64_pvt * pvt,u8 dct)90 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
91 {
92 	u32 reg = 0;
93 
94 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
95 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 	reg |= dct;
97 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
98 }
99 
100 /*
101  *
102  * Depending on the family, F2 DCT reads need special handling:
103  *
104  * K8: has a single DCT only and no address offsets >= 0x100
105  *
106  * F10h: each DCT has its own set of regs
107  *	DCT0 -> F2x040..
108  *	DCT1 -> F2x140..
109  *
110  * F16h: has only 1 DCT
111  *
112  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
113  */
amd64_read_dct_pci_cfg(struct amd64_pvt * pvt,u8 dct,int offset,u32 * val)114 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
115 					 int offset, u32 *val)
116 {
117 	switch (pvt->fam) {
118 	case 0xf:
119 		if (dct || offset >= 0x100)
120 			return -EINVAL;
121 		break;
122 
123 	case 0x10:
124 		if (dct) {
125 			/*
126 			 * Note: If ganging is enabled, barring the regs
127 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
128 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
129 			 */
130 			if (dct_ganging_enabled(pvt))
131 				return 0;
132 
133 			offset += 0x100;
134 		}
135 		break;
136 
137 	case 0x15:
138 		/*
139 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
140 		 * We should select which DCT we access using F1x10C[DctCfgSel]
141 		 */
142 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
143 		f15h_select_dct(pvt, dct);
144 		break;
145 
146 	case 0x16:
147 		if (dct)
148 			return -EINVAL;
149 		break;
150 
151 	default:
152 		break;
153 	}
154 	return amd64_read_pci_cfg(pvt->F2, offset, val);
155 }
156 
157 /*
158  * Memory scrubber control interface. For K8, memory scrubbing is handled by
159  * hardware and can involve L2 cache, dcache as well as the main memory. With
160  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
161  * functionality.
162  *
163  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
164  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
165  * bytes/sec for the setting.
166  *
167  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
168  * other archs, we might not have access to the caches directly.
169  */
170 
__f17h_set_scrubval(struct amd64_pvt * pvt,u32 scrubval)171 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
172 {
173 	/*
174 	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
175 	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
176 	 * as 0x0, scrubval 0x6 as 0x1, etc.
177 	 */
178 	if (scrubval >= 0x5 && scrubval <= 0x14) {
179 		scrubval -= 0x5;
180 		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
181 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 	} else {
183 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
184 	}
185 }
186 /*
187  * Scan the scrub rate mapping table for a close or matching bandwidth value to
188  * issue. If requested is too big, then use last maximum value found.
189  */
__set_scrub_rate(struct amd64_pvt * pvt,u32 new_bw,u32 min_rate)190 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
191 {
192 	u32 scrubval;
193 	int i;
194 
195 	/*
196 	 * map the configured rate (new_bw) to a value specific to the AMD64
197 	 * memory controller and apply to register. Search for the first
198 	 * bandwidth entry that is greater or equal than the setting requested
199 	 * and program that. If at last entry, turn off DRAM scrubbing.
200 	 *
201 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
202 	 * by falling back to the last element in scrubrates[].
203 	 */
204 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
205 		/*
206 		 * skip scrub rates which aren't recommended
207 		 * (see F10 BKDG, F3x58)
208 		 */
209 		if (scrubrates[i].scrubval < min_rate)
210 			continue;
211 
212 		if (scrubrates[i].bandwidth <= new_bw)
213 			break;
214 	}
215 
216 	scrubval = scrubrates[i].scrubval;
217 
218 	if (pvt->fam == 0x17 || pvt->fam == 0x18) {
219 		__f17h_set_scrubval(pvt, scrubval);
220 	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
221 		f15h_select_dct(pvt, 0);
222 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
223 		f15h_select_dct(pvt, 1);
224 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 	} else {
226 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
227 	}
228 
229 	if (scrubval)
230 		return scrubrates[i].bandwidth;
231 
232 	return 0;
233 }
234 
set_scrub_rate(struct mem_ctl_info * mci,u32 bw)235 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
236 {
237 	struct amd64_pvt *pvt = mci->pvt_info;
238 	u32 min_scrubrate = 0x5;
239 
240 	if (pvt->fam == 0xf)
241 		min_scrubrate = 0x0;
242 
243 	if (pvt->fam == 0x15) {
244 		/* Erratum #505 */
245 		if (pvt->model < 0x10)
246 			f15h_select_dct(pvt, 0);
247 
248 		if (pvt->model == 0x60)
249 			min_scrubrate = 0x6;
250 	}
251 	return __set_scrub_rate(pvt, bw, min_scrubrate);
252 }
253 
get_scrub_rate(struct mem_ctl_info * mci)254 static int get_scrub_rate(struct mem_ctl_info *mci)
255 {
256 	struct amd64_pvt *pvt = mci->pvt_info;
257 	int i, retval = -EINVAL;
258 	u32 scrubval = 0;
259 
260 	switch (pvt->fam) {
261 	case 0x15:
262 		/* Erratum #505 */
263 		if (pvt->model < 0x10)
264 			f15h_select_dct(pvt, 0);
265 
266 		if (pvt->model == 0x60)
267 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
268 		break;
269 
270 	case 0x17:
271 	case 0x18:
272 		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
273 		if (scrubval & BIT(0)) {
274 			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
275 			scrubval &= 0xF;
276 			scrubval += 0x5;
277 		} else {
278 			scrubval = 0;
279 		}
280 		break;
281 
282 	default:
283 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
284 		break;
285 	}
286 
287 	scrubval = scrubval & 0x001F;
288 
289 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
290 		if (scrubrates[i].scrubval == scrubval) {
291 			retval = scrubrates[i].bandwidth;
292 			break;
293 		}
294 	}
295 	return retval;
296 }
297 
298 /*
299  * returns true if the SysAddr given by sys_addr matches the
300  * DRAM base/limit associated with node_id
301  */
base_limit_match(struct amd64_pvt * pvt,u64 sys_addr,u8 nid)302 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
303 {
304 	u64 addr;
305 
306 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
307 	 * all ones if the most significant implemented address bit is 1.
308 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
309 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
310 	 * Application Programming.
311 	 */
312 	addr = sys_addr & 0x000000ffffffffffull;
313 
314 	return ((addr >= get_dram_base(pvt, nid)) &&
315 		(addr <= get_dram_limit(pvt, nid)));
316 }
317 
318 /*
319  * Attempt to map a SysAddr to a node. On success, return a pointer to the
320  * mem_ctl_info structure for the node that the SysAddr maps to.
321  *
322  * On failure, return NULL.
323  */
find_mc_by_sys_addr(struct mem_ctl_info * mci,u64 sys_addr)324 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
325 						u64 sys_addr)
326 {
327 	struct amd64_pvt *pvt;
328 	u8 node_id;
329 	u32 intlv_en, bits;
330 
331 	/*
332 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
333 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
334 	 */
335 	pvt = mci->pvt_info;
336 
337 	/*
338 	 * The value of this field should be the same for all DRAM Base
339 	 * registers.  Therefore we arbitrarily choose to read it from the
340 	 * register for node 0.
341 	 */
342 	intlv_en = dram_intlv_en(pvt, 0);
343 
344 	if (intlv_en == 0) {
345 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
346 			if (base_limit_match(pvt, sys_addr, node_id))
347 				goto found;
348 		}
349 		goto err_no_match;
350 	}
351 
352 	if (unlikely((intlv_en != 0x01) &&
353 		     (intlv_en != 0x03) &&
354 		     (intlv_en != 0x07))) {
355 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
356 		return NULL;
357 	}
358 
359 	bits = (((u32) sys_addr) >> 12) & intlv_en;
360 
361 	for (node_id = 0; ; ) {
362 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
363 			break;	/* intlv_sel field matches */
364 
365 		if (++node_id >= DRAM_RANGES)
366 			goto err_no_match;
367 	}
368 
369 	/* sanity test for sys_addr */
370 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
371 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
372 			   "range for node %d with node interleaving enabled.\n",
373 			   __func__, sys_addr, node_id);
374 		return NULL;
375 	}
376 
377 found:
378 	return edac_mc_find((int)node_id);
379 
380 err_no_match:
381 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
382 		 (unsigned long)sys_addr);
383 
384 	return NULL;
385 }
386 
387 /*
388  * compute the CS base address of the @csrow on the DRAM controller @dct.
389  * For details see F2x[5C:40] in the processor's BKDG
390  */
get_cs_base_and_mask(struct amd64_pvt * pvt,int csrow,u8 dct,u64 * base,u64 * mask)391 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
392 				 u64 *base, u64 *mask)
393 {
394 	u64 csbase, csmask, base_bits, mask_bits;
395 	u8 addr_shift;
396 
397 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
398 		csbase		= pvt->csels[dct].csbases[csrow];
399 		csmask		= pvt->csels[dct].csmasks[csrow];
400 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
401 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
402 		addr_shift	= 4;
403 
404 	/*
405 	 * F16h and F15h, models 30h and later need two addr_shift values:
406 	 * 8 for high and 6 for low (cf. F16h BKDG).
407 	 */
408 	} else if (pvt->fam == 0x16 ||
409 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
410 		csbase          = pvt->csels[dct].csbases[csrow];
411 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
412 
413 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
414 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
415 
416 		*mask = ~0ULL;
417 		/* poke holes for the csmask */
418 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
419 			   (GENMASK_ULL(30, 19) << 8));
420 
421 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
422 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
423 
424 		return;
425 	} else {
426 		csbase		= pvt->csels[dct].csbases[csrow];
427 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
428 		addr_shift	= 8;
429 
430 		if (pvt->fam == 0x15)
431 			base_bits = mask_bits =
432 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
433 		else
434 			base_bits = mask_bits =
435 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
436 	}
437 
438 	*base  = (csbase & base_bits) << addr_shift;
439 
440 	*mask  = ~0ULL;
441 	/* poke holes for the csmask */
442 	*mask &= ~(mask_bits << addr_shift);
443 	/* OR them in */
444 	*mask |= (csmask & mask_bits) << addr_shift;
445 }
446 
447 #define for_each_chip_select(i, dct, pvt) \
448 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
449 
450 #define chip_select_base(i, dct, pvt) \
451 	pvt->csels[dct].csbases[i]
452 
453 #define for_each_chip_select_mask(i, dct, pvt) \
454 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
455 
456 #define for_each_umc(i) \
457 	for (i = 0; i < num_umcs; i++)
458 
459 /*
460  * @input_addr is an InputAddr associated with the node given by mci. Return the
461  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
462  */
input_addr_to_csrow(struct mem_ctl_info * mci,u64 input_addr)463 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
464 {
465 	struct amd64_pvt *pvt;
466 	int csrow;
467 	u64 base, mask;
468 
469 	pvt = mci->pvt_info;
470 
471 	for_each_chip_select(csrow, 0, pvt) {
472 		if (!csrow_enabled(csrow, 0, pvt))
473 			continue;
474 
475 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
476 
477 		mask = ~mask;
478 
479 		if ((input_addr & mask) == (base & mask)) {
480 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
481 				 (unsigned long)input_addr, csrow,
482 				 pvt->mc_node_id);
483 
484 			return csrow;
485 		}
486 	}
487 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
488 		 (unsigned long)input_addr, pvt->mc_node_id);
489 
490 	return -1;
491 }
492 
493 /*
494  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
495  * for the node represented by mci. Info is passed back in *hole_base,
496  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
497  * info is invalid. Info may be invalid for either of the following reasons:
498  *
499  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
500  *   Address Register does not exist.
501  *
502  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
503  *   indicating that its contents are not valid.
504  *
505  * The values passed back in *hole_base, *hole_offset, and *hole_size are
506  * complete 32-bit values despite the fact that the bitfields in the DHAR
507  * only represent bits 31-24 of the base and offset values.
508  */
amd64_get_dram_hole_info(struct mem_ctl_info * mci,u64 * hole_base,u64 * hole_offset,u64 * hole_size)509 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
510 			     u64 *hole_offset, u64 *hole_size)
511 {
512 	struct amd64_pvt *pvt = mci->pvt_info;
513 
514 	/* only revE and later have the DRAM Hole Address Register */
515 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
516 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
517 			 pvt->ext_model, pvt->mc_node_id);
518 		return 1;
519 	}
520 
521 	/* valid for Fam10h and above */
522 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
523 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
524 		return 1;
525 	}
526 
527 	if (!dhar_valid(pvt)) {
528 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
529 			 pvt->mc_node_id);
530 		return 1;
531 	}
532 
533 	/* This node has Memory Hoisting */
534 
535 	/* +------------------+--------------------+--------------------+-----
536 	 * | memory           | DRAM hole          | relocated          |
537 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
538 	 * |                  |                    | DRAM hole          |
539 	 * |                  |                    | [0x100000000,      |
540 	 * |                  |                    |  (0x100000000+     |
541 	 * |                  |                    |   (0xffffffff-x))] |
542 	 * +------------------+--------------------+--------------------+-----
543 	 *
544 	 * Above is a diagram of physical memory showing the DRAM hole and the
545 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
546 	 * starts at address x (the base address) and extends through address
547 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
548 	 * addresses in the hole so that they start at 0x100000000.
549 	 */
550 
551 	*hole_base = dhar_base(pvt);
552 	*hole_size = (1ULL << 32) - *hole_base;
553 
554 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
555 					: k8_dhar_offset(pvt);
556 
557 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
558 		 pvt->mc_node_id, (unsigned long)*hole_base,
559 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
560 
561 	return 0;
562 }
563 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
564 
565 /*
566  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
567  * assumed that sys_addr maps to the node given by mci.
568  *
569  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
570  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
571  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
572  * then it is also involved in translating a SysAddr to a DramAddr. Sections
573  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
574  * These parts of the documentation are unclear. I interpret them as follows:
575  *
576  * When node n receives a SysAddr, it processes the SysAddr as follows:
577  *
578  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
579  *    Limit registers for node n. If the SysAddr is not within the range
580  *    specified by the base and limit values, then node n ignores the Sysaddr
581  *    (since it does not map to node n). Otherwise continue to step 2 below.
582  *
583  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
584  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
585  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
586  *    hole. If not, skip to step 3 below. Else get the value of the
587  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
588  *    offset defined by this value from the SysAddr.
589  *
590  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
591  *    Base register for node n. To obtain the DramAddr, subtract the base
592  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
593  */
sys_addr_to_dram_addr(struct mem_ctl_info * mci,u64 sys_addr)594 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
595 {
596 	struct amd64_pvt *pvt = mci->pvt_info;
597 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
598 	int ret;
599 
600 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
601 
602 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
603 				      &hole_size);
604 	if (!ret) {
605 		if ((sys_addr >= (1ULL << 32)) &&
606 		    (sys_addr < ((1ULL << 32) + hole_size))) {
607 			/* use DHAR to translate SysAddr to DramAddr */
608 			dram_addr = sys_addr - hole_offset;
609 
610 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
611 				 (unsigned long)sys_addr,
612 				 (unsigned long)dram_addr);
613 
614 			return dram_addr;
615 		}
616 	}
617 
618 	/*
619 	 * Translate the SysAddr to a DramAddr as shown near the start of
620 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
621 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
622 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
623 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
624 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
625 	 * Programmer's Manual Volume 1 Application Programming.
626 	 */
627 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
628 
629 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
630 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
631 	return dram_addr;
632 }
633 
634 /*
635  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
636  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
637  * for node interleaving.
638  */
num_node_interleave_bits(unsigned intlv_en)639 static int num_node_interleave_bits(unsigned intlv_en)
640 {
641 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
642 	int n;
643 
644 	BUG_ON(intlv_en > 7);
645 	n = intlv_shift_table[intlv_en];
646 	return n;
647 }
648 
649 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
dram_addr_to_input_addr(struct mem_ctl_info * mci,u64 dram_addr)650 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
651 {
652 	struct amd64_pvt *pvt;
653 	int intlv_shift;
654 	u64 input_addr;
655 
656 	pvt = mci->pvt_info;
657 
658 	/*
659 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
660 	 * concerning translating a DramAddr to an InputAddr.
661 	 */
662 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
663 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
664 		      (dram_addr & 0xfff);
665 
666 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
667 		 intlv_shift, (unsigned long)dram_addr,
668 		 (unsigned long)input_addr);
669 
670 	return input_addr;
671 }
672 
673 /*
674  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
675  * assumed that @sys_addr maps to the node given by mci.
676  */
sys_addr_to_input_addr(struct mem_ctl_info * mci,u64 sys_addr)677 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
678 {
679 	u64 input_addr;
680 
681 	input_addr =
682 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
683 
684 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
685 		 (unsigned long)sys_addr, (unsigned long)input_addr);
686 
687 	return input_addr;
688 }
689 
690 /* Map the Error address to a PAGE and PAGE OFFSET. */
error_address_to_page_and_offset(u64 error_address,struct err_info * err)691 static inline void error_address_to_page_and_offset(u64 error_address,
692 						    struct err_info *err)
693 {
694 	err->page = (u32) (error_address >> PAGE_SHIFT);
695 	err->offset = ((u32) error_address) & ~PAGE_MASK;
696 }
697 
698 /*
699  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
700  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
701  * of a node that detected an ECC memory error.  mci represents the node that
702  * the error address maps to (possibly different from the node that detected
703  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
704  * error.
705  */
sys_addr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr)706 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
707 {
708 	int csrow;
709 
710 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
711 
712 	if (csrow == -1)
713 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
714 				  "address 0x%lx\n", (unsigned long)sys_addr);
715 	return csrow;
716 }
717 
718 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
719 
720 /*
721  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
722  * are ECC capable.
723  */
determine_edac_cap(struct amd64_pvt * pvt)724 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
725 {
726 	unsigned long edac_cap = EDAC_FLAG_NONE;
727 	u8 bit;
728 
729 	if (pvt->umc) {
730 		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
731 
732 		for_each_umc(i) {
733 			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
734 				continue;
735 
736 			umc_en_mask |= BIT(i);
737 
738 			/* UMC Configuration bit 12 (DimmEccEn) */
739 			if (pvt->umc[i].umc_cfg & BIT(12))
740 				dimm_ecc_en_mask |= BIT(i);
741 		}
742 
743 		if (umc_en_mask == dimm_ecc_en_mask)
744 			edac_cap = EDAC_FLAG_SECDED;
745 	} else {
746 		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
747 			? 19
748 			: 17;
749 
750 		if (pvt->dclr0 & BIT(bit))
751 			edac_cap = EDAC_FLAG_SECDED;
752 	}
753 
754 	return edac_cap;
755 }
756 
757 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
758 
debug_dump_dramcfg_low(struct amd64_pvt * pvt,u32 dclr,int chan)759 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
760 {
761 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
762 
763 	if (pvt->dram_type == MEM_LRDDR3) {
764 		u32 dcsm = pvt->csels[chan].csmasks[0];
765 		/*
766 		 * It's assumed all LRDIMMs in a DCT are going to be of
767 		 * same 'type' until proven otherwise. So, use a cs
768 		 * value of '0' here to get dcsm value.
769 		 */
770 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
771 	}
772 
773 	edac_dbg(1, "All DIMMs support ECC:%s\n",
774 		    (dclr & BIT(19)) ? "yes" : "no");
775 
776 
777 	edac_dbg(1, "  PAR/ERR parity: %s\n",
778 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
779 
780 	if (pvt->fam == 0x10)
781 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
782 			 (dclr & BIT(11)) ?  "128b" : "64b");
783 
784 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
785 		 (dclr & BIT(12)) ?  "yes" : "no",
786 		 (dclr & BIT(13)) ?  "yes" : "no",
787 		 (dclr & BIT(14)) ?  "yes" : "no",
788 		 (dclr & BIT(15)) ?  "yes" : "no");
789 }
790 
791 #define CS_EVEN_PRIMARY		BIT(0)
792 #define CS_ODD_PRIMARY		BIT(1)
793 #define CS_EVEN_SECONDARY	BIT(2)
794 #define CS_ODD_SECONDARY	BIT(3)
795 
796 #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
797 #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
798 
f17_get_cs_mode(int dimm,u8 ctrl,struct amd64_pvt * pvt)799 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
800 {
801 	int cs_mode = 0;
802 
803 	if (csrow_enabled(2 * dimm, ctrl, pvt))
804 		cs_mode |= CS_EVEN_PRIMARY;
805 
806 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
807 		cs_mode |= CS_ODD_PRIMARY;
808 
809 	/* Asymmetric dual-rank DIMM support. */
810 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
811 		cs_mode |= CS_ODD_SECONDARY;
812 
813 	return cs_mode;
814 }
815 
debug_display_dimm_sizes_df(struct amd64_pvt * pvt,u8 ctrl)816 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
817 {
818 	int dimm, size0, size1, cs0, cs1, cs_mode;
819 
820 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
821 
822 	for (dimm = 0; dimm < 2; dimm++) {
823 		cs0 = dimm * 2;
824 		cs1 = dimm * 2 + 1;
825 
826 		cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
827 
828 		size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
829 		size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
830 
831 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
832 				cs0,	size0,
833 				cs1,	size1);
834 	}
835 }
836 
__dump_misc_regs_df(struct amd64_pvt * pvt)837 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
838 {
839 	struct amd64_umc *umc;
840 	u32 i, tmp, umc_base;
841 
842 	for_each_umc(i) {
843 		umc_base = get_umc_base(i);
844 		umc = &pvt->umc[i];
845 
846 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
847 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
848 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
849 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
850 
851 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
852 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
853 
854 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
855 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
856 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
857 
858 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
859 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
860 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
861 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
862 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
863 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
864 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
865 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
866 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
867 
868 		if (pvt->dram_type == MEM_LRDDR4) {
869 			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
870 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
871 					i, 1 << ((tmp >> 4) & 0x3));
872 		}
873 
874 		debug_display_dimm_sizes_df(pvt, i);
875 	}
876 
877 	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
878 		 pvt->dhar, dhar_base(pvt));
879 }
880 
881 /* Display and decode various NB registers for debug purposes. */
__dump_misc_regs(struct amd64_pvt * pvt)882 static void __dump_misc_regs(struct amd64_pvt *pvt)
883 {
884 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
885 
886 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
887 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
888 
889 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
890 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
891 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
892 
893 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
894 
895 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
896 
897 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
898 		 pvt->dhar, dhar_base(pvt),
899 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
900 				   : f10_dhar_offset(pvt));
901 
902 	debug_display_dimm_sizes(pvt, 0);
903 
904 	/* everything below this point is Fam10h and above */
905 	if (pvt->fam == 0xf)
906 		return;
907 
908 	debug_display_dimm_sizes(pvt, 1);
909 
910 	/* Only if NOT ganged does dclr1 have valid info */
911 	if (!dct_ganging_enabled(pvt))
912 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
913 }
914 
915 /* Display and decode various NB registers for debug purposes. */
dump_misc_regs(struct amd64_pvt * pvt)916 static void dump_misc_regs(struct amd64_pvt *pvt)
917 {
918 	if (pvt->umc)
919 		__dump_misc_regs_df(pvt);
920 	else
921 		__dump_misc_regs(pvt);
922 
923 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
924 
925 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
926 }
927 
928 /*
929  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
930  */
prep_chip_selects(struct amd64_pvt * pvt)931 static void prep_chip_selects(struct amd64_pvt *pvt)
932 {
933 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
934 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
935 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
936 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
937 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
938 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
939 	} else if (pvt->fam >= 0x17) {
940 		int umc;
941 
942 		for_each_umc(umc) {
943 			pvt->csels[umc].b_cnt = 4;
944 			pvt->csels[umc].m_cnt = 2;
945 		}
946 
947 	} else {
948 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
949 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
950 	}
951 }
952 
read_umc_base_mask(struct amd64_pvt * pvt)953 static void read_umc_base_mask(struct amd64_pvt *pvt)
954 {
955 	u32 umc_base_reg, umc_base_reg_sec;
956 	u32 umc_mask_reg, umc_mask_reg_sec;
957 	u32 base_reg, base_reg_sec;
958 	u32 mask_reg, mask_reg_sec;
959 	u32 *base, *base_sec;
960 	u32 *mask, *mask_sec;
961 	int cs, umc;
962 
963 	for_each_umc(umc) {
964 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
965 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
966 
967 		for_each_chip_select(cs, umc, pvt) {
968 			base = &pvt->csels[umc].csbases[cs];
969 			base_sec = &pvt->csels[umc].csbases_sec[cs];
970 
971 			base_reg = umc_base_reg + (cs * 4);
972 			base_reg_sec = umc_base_reg_sec + (cs * 4);
973 
974 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
975 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
976 					 umc, cs, *base, base_reg);
977 
978 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
979 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
980 					 umc, cs, *base_sec, base_reg_sec);
981 		}
982 
983 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
984 		umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
985 
986 		for_each_chip_select_mask(cs, umc, pvt) {
987 			mask = &pvt->csels[umc].csmasks[cs];
988 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
989 
990 			mask_reg = umc_mask_reg + (cs * 4);
991 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
992 
993 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
994 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
995 					 umc, cs, *mask, mask_reg);
996 
997 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
998 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
999 					 umc, cs, *mask_sec, mask_reg_sec);
1000 		}
1001 	}
1002 }
1003 
1004 /*
1005  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1006  */
read_dct_base_mask(struct amd64_pvt * pvt)1007 static void read_dct_base_mask(struct amd64_pvt *pvt)
1008 {
1009 	int cs;
1010 
1011 	prep_chip_selects(pvt);
1012 
1013 	if (pvt->umc)
1014 		return read_umc_base_mask(pvt);
1015 
1016 	for_each_chip_select(cs, 0, pvt) {
1017 		int reg0   = DCSB0 + (cs * 4);
1018 		int reg1   = DCSB1 + (cs * 4);
1019 		u32 *base0 = &pvt->csels[0].csbases[cs];
1020 		u32 *base1 = &pvt->csels[1].csbases[cs];
1021 
1022 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1023 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
1024 				 cs, *base0, reg0);
1025 
1026 		if (pvt->fam == 0xf)
1027 			continue;
1028 
1029 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1030 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
1031 				 cs, *base1, (pvt->fam == 0x10) ? reg1
1032 							: reg0);
1033 	}
1034 
1035 	for_each_chip_select_mask(cs, 0, pvt) {
1036 		int reg0   = DCSM0 + (cs * 4);
1037 		int reg1   = DCSM1 + (cs * 4);
1038 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1039 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1040 
1041 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1042 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1043 				 cs, *mask0, reg0);
1044 
1045 		if (pvt->fam == 0xf)
1046 			continue;
1047 
1048 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1049 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1050 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
1051 							: reg0);
1052 	}
1053 }
1054 
determine_memory_type(struct amd64_pvt * pvt)1055 static void determine_memory_type(struct amd64_pvt *pvt)
1056 {
1057 	u32 dram_ctrl, dcsm;
1058 
1059 	switch (pvt->fam) {
1060 	case 0xf:
1061 		if (pvt->ext_model >= K8_REV_F)
1062 			goto ddr3;
1063 
1064 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1065 		return;
1066 
1067 	case 0x10:
1068 		if (pvt->dchr0 & DDR3_MODE)
1069 			goto ddr3;
1070 
1071 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1072 		return;
1073 
1074 	case 0x15:
1075 		if (pvt->model < 0x60)
1076 			goto ddr3;
1077 
1078 		/*
1079 		 * Model 0x60h needs special handling:
1080 		 *
1081 		 * We use a Chip Select value of '0' to obtain dcsm.
1082 		 * Theoretically, it is possible to populate LRDIMMs of different
1083 		 * 'Rank' value on a DCT. But this is not the common case. So,
1084 		 * it's reasonable to assume all DIMMs are going to be of same
1085 		 * 'type' until proven otherwise.
1086 		 */
1087 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1088 		dcsm = pvt->csels[0].csmasks[0];
1089 
1090 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1091 			pvt->dram_type = MEM_DDR4;
1092 		else if (pvt->dclr0 & BIT(16))
1093 			pvt->dram_type = MEM_DDR3;
1094 		else if (dcsm & 0x3)
1095 			pvt->dram_type = MEM_LRDDR3;
1096 		else
1097 			pvt->dram_type = MEM_RDDR3;
1098 
1099 		return;
1100 
1101 	case 0x16:
1102 		goto ddr3;
1103 
1104 	case 0x17:
1105 	case 0x18:
1106 		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1107 			pvt->dram_type = MEM_LRDDR4;
1108 		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1109 			pvt->dram_type = MEM_RDDR4;
1110 		else
1111 			pvt->dram_type = MEM_DDR4;
1112 		return;
1113 
1114 	default:
1115 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1116 		pvt->dram_type = MEM_EMPTY;
1117 	}
1118 	return;
1119 
1120 ddr3:
1121 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1122 }
1123 
1124 /* Get the number of DCT channels the memory controller is using. */
k8_early_channel_count(struct amd64_pvt * pvt)1125 static int k8_early_channel_count(struct amd64_pvt *pvt)
1126 {
1127 	int flag;
1128 
1129 	if (pvt->ext_model >= K8_REV_F)
1130 		/* RevF (NPT) and later */
1131 		flag = pvt->dclr0 & WIDTH_128;
1132 	else
1133 		/* RevE and earlier */
1134 		flag = pvt->dclr0 & REVE_WIDTH_128;
1135 
1136 	/* not used */
1137 	pvt->dclr1 = 0;
1138 
1139 	return (flag) ? 2 : 1;
1140 }
1141 
1142 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
get_error_address(struct amd64_pvt * pvt,struct mce * m)1143 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1144 {
1145 	u16 mce_nid = amd_get_nb_id(m->extcpu);
1146 	struct mem_ctl_info *mci;
1147 	u8 start_bit = 1;
1148 	u8 end_bit   = 47;
1149 	u64 addr;
1150 
1151 	mci = edac_mc_find(mce_nid);
1152 	if (!mci)
1153 		return 0;
1154 
1155 	pvt = mci->pvt_info;
1156 
1157 	if (pvt->fam == 0xf) {
1158 		start_bit = 3;
1159 		end_bit   = 39;
1160 	}
1161 
1162 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1163 
1164 	/*
1165 	 * Erratum 637 workaround
1166 	 */
1167 	if (pvt->fam == 0x15) {
1168 		u64 cc6_base, tmp_addr;
1169 		u32 tmp;
1170 		u8 intlv_en;
1171 
1172 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1173 			return addr;
1174 
1175 
1176 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1177 		intlv_en = tmp >> 21 & 0x7;
1178 
1179 		/* add [47:27] + 3 trailing bits */
1180 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1181 
1182 		/* reverse and add DramIntlvEn */
1183 		cc6_base |= intlv_en ^ 0x7;
1184 
1185 		/* pin at [47:24] */
1186 		cc6_base <<= 24;
1187 
1188 		if (!intlv_en)
1189 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1190 
1191 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1192 
1193 							/* faster log2 */
1194 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1195 
1196 		/* OR DramIntlvSel into bits [14:12] */
1197 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1198 
1199 		/* add remaining [11:0] bits from original MC4_ADDR */
1200 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1201 
1202 		return cc6_base | tmp_addr;
1203 	}
1204 
1205 	return addr;
1206 }
1207 
pci_get_related_function(unsigned int vendor,unsigned int device,struct pci_dev * related)1208 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1209 						unsigned int device,
1210 						struct pci_dev *related)
1211 {
1212 	struct pci_dev *dev = NULL;
1213 
1214 	while ((dev = pci_get_device(vendor, device, dev))) {
1215 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1216 		    (dev->bus->number == related->bus->number) &&
1217 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1218 			break;
1219 	}
1220 
1221 	return dev;
1222 }
1223 
read_dram_base_limit_regs(struct amd64_pvt * pvt,unsigned range)1224 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1225 {
1226 	struct amd_northbridge *nb;
1227 	struct pci_dev *f1 = NULL;
1228 	unsigned int pci_func;
1229 	int off = range << 3;
1230 	u32 llim;
1231 
1232 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1233 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1234 
1235 	if (pvt->fam == 0xf)
1236 		return;
1237 
1238 	if (!dram_rw(pvt, range))
1239 		return;
1240 
1241 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1242 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1243 
1244 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1245 	if (pvt->fam != 0x15)
1246 		return;
1247 
1248 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1249 	if (WARN_ON(!nb))
1250 		return;
1251 
1252 	if (pvt->model == 0x60)
1253 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1254 	else if (pvt->model == 0x30)
1255 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1256 	else
1257 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1258 
1259 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1260 	if (WARN_ON(!f1))
1261 		return;
1262 
1263 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1264 
1265 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1266 
1267 				    /* {[39:27],111b} */
1268 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1269 
1270 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1271 
1272 				    /* [47:40] */
1273 	pvt->ranges[range].lim.hi |= llim >> 13;
1274 
1275 	pci_dev_put(f1);
1276 }
1277 
k8_map_sysaddr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr,struct err_info * err)1278 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1279 				    struct err_info *err)
1280 {
1281 	struct amd64_pvt *pvt = mci->pvt_info;
1282 
1283 	error_address_to_page_and_offset(sys_addr, err);
1284 
1285 	/*
1286 	 * Find out which node the error address belongs to. This may be
1287 	 * different from the node that detected the error.
1288 	 */
1289 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1290 	if (!err->src_mci) {
1291 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1292 			     (unsigned long)sys_addr);
1293 		err->err_code = ERR_NODE;
1294 		return;
1295 	}
1296 
1297 	/* Now map the sys_addr to a CSROW */
1298 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1299 	if (err->csrow < 0) {
1300 		err->err_code = ERR_CSROW;
1301 		return;
1302 	}
1303 
1304 	/* CHIPKILL enabled */
1305 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1306 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1307 		if (err->channel < 0) {
1308 			/*
1309 			 * Syndrome didn't map, so we don't know which of the
1310 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1311 			 * as suspect.
1312 			 */
1313 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1314 				      "possible error reporting race\n",
1315 				      err->syndrome);
1316 			err->err_code = ERR_CHANNEL;
1317 			return;
1318 		}
1319 	} else {
1320 		/*
1321 		 * non-chipkill ecc mode
1322 		 *
1323 		 * The k8 documentation is unclear about how to determine the
1324 		 * channel number when using non-chipkill memory.  This method
1325 		 * was obtained from email communication with someone at AMD.
1326 		 * (Wish the email was placed in this comment - norsk)
1327 		 */
1328 		err->channel = ((sys_addr & BIT(3)) != 0);
1329 	}
1330 }
1331 
ddr2_cs_size(unsigned i,bool dct_width)1332 static int ddr2_cs_size(unsigned i, bool dct_width)
1333 {
1334 	unsigned shift = 0;
1335 
1336 	if (i <= 2)
1337 		shift = i;
1338 	else if (!(i & 0x1))
1339 		shift = i >> 1;
1340 	else
1341 		shift = (i + 1) >> 1;
1342 
1343 	return 128 << (shift + !!dct_width);
1344 }
1345 
k8_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1346 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1347 				  unsigned cs_mode, int cs_mask_nr)
1348 {
1349 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1350 
1351 	if (pvt->ext_model >= K8_REV_F) {
1352 		WARN_ON(cs_mode > 11);
1353 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1354 	}
1355 	else if (pvt->ext_model >= K8_REV_D) {
1356 		unsigned diff;
1357 		WARN_ON(cs_mode > 10);
1358 
1359 		/*
1360 		 * the below calculation, besides trying to win an obfuscated C
1361 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1362 		 * mappings are:
1363 		 *
1364 		 * cs_mode	CS size (mb)
1365 		 * =======	============
1366 		 * 0		32
1367 		 * 1		64
1368 		 * 2		128
1369 		 * 3		128
1370 		 * 4		256
1371 		 * 5		512
1372 		 * 6		256
1373 		 * 7		512
1374 		 * 8		1024
1375 		 * 9		1024
1376 		 * 10		2048
1377 		 *
1378 		 * Basically, it calculates a value with which to shift the
1379 		 * smallest CS size of 32MB.
1380 		 *
1381 		 * ddr[23]_cs_size have a similar purpose.
1382 		 */
1383 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1384 
1385 		return 32 << (cs_mode - diff);
1386 	}
1387 	else {
1388 		WARN_ON(cs_mode > 6);
1389 		return 32 << cs_mode;
1390 	}
1391 }
1392 
1393 /*
1394  * Get the number of DCT channels in use.
1395  *
1396  * Return:
1397  *	number of Memory Channels in operation
1398  * Pass back:
1399  *	contents of the DCL0_LOW register
1400  */
f1x_early_channel_count(struct amd64_pvt * pvt)1401 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1402 {
1403 	int i, j, channels = 0;
1404 
1405 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1406 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1407 		return 2;
1408 
1409 	/*
1410 	 * Need to check if in unganged mode: In such, there are 2 channels,
1411 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1412 	 * bit will be OFF.
1413 	 *
1414 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1415 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1416 	 */
1417 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1418 
1419 	/*
1420 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1421 	 * is more than just one DIMM present in unganged mode. Need to check
1422 	 * both controllers since DIMMs can be placed in either one.
1423 	 */
1424 	for (i = 0; i < 2; i++) {
1425 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1426 
1427 		for (j = 0; j < 4; j++) {
1428 			if (DBAM_DIMM(j, dbam) > 0) {
1429 				channels++;
1430 				break;
1431 			}
1432 		}
1433 	}
1434 
1435 	if (channels > 2)
1436 		channels = 2;
1437 
1438 	amd64_info("MCT channel count: %d\n", channels);
1439 
1440 	return channels;
1441 }
1442 
f17_early_channel_count(struct amd64_pvt * pvt)1443 static int f17_early_channel_count(struct amd64_pvt *pvt)
1444 {
1445 	int i, channels = 0;
1446 
1447 	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1448 	for_each_umc(i)
1449 		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1450 
1451 	amd64_info("MCT channel count: %d\n", channels);
1452 
1453 	return channels;
1454 }
1455 
ddr3_cs_size(unsigned i,bool dct_width)1456 static int ddr3_cs_size(unsigned i, bool dct_width)
1457 {
1458 	unsigned shift = 0;
1459 	int cs_size = 0;
1460 
1461 	if (i == 0 || i == 3 || i == 4)
1462 		cs_size = -1;
1463 	else if (i <= 2)
1464 		shift = i;
1465 	else if (i == 12)
1466 		shift = 7;
1467 	else if (!(i & 0x1))
1468 		shift = i >> 1;
1469 	else
1470 		shift = (i + 1) >> 1;
1471 
1472 	if (cs_size != -1)
1473 		cs_size = (128 * (1 << !!dct_width)) << shift;
1474 
1475 	return cs_size;
1476 }
1477 
ddr3_lrdimm_cs_size(unsigned i,unsigned rank_multiply)1478 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1479 {
1480 	unsigned shift = 0;
1481 	int cs_size = 0;
1482 
1483 	if (i < 4 || i == 6)
1484 		cs_size = -1;
1485 	else if (i == 12)
1486 		shift = 7;
1487 	else if (!(i & 0x1))
1488 		shift = i >> 1;
1489 	else
1490 		shift = (i + 1) >> 1;
1491 
1492 	if (cs_size != -1)
1493 		cs_size = rank_multiply * (128 << shift);
1494 
1495 	return cs_size;
1496 }
1497 
ddr4_cs_size(unsigned i)1498 static int ddr4_cs_size(unsigned i)
1499 {
1500 	int cs_size = 0;
1501 
1502 	if (i == 0)
1503 		cs_size = -1;
1504 	else if (i == 1)
1505 		cs_size = 1024;
1506 	else
1507 		/* Min cs_size = 1G */
1508 		cs_size = 1024 * (1 << (i >> 1));
1509 
1510 	return cs_size;
1511 }
1512 
f10_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1513 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1514 				   unsigned cs_mode, int cs_mask_nr)
1515 {
1516 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1517 
1518 	WARN_ON(cs_mode > 11);
1519 
1520 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1521 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1522 	else
1523 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1524 }
1525 
1526 /*
1527  * F15h supports only 64bit DCT interfaces
1528  */
f15_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1529 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1530 				   unsigned cs_mode, int cs_mask_nr)
1531 {
1532 	WARN_ON(cs_mode > 12);
1533 
1534 	return ddr3_cs_size(cs_mode, false);
1535 }
1536 
1537 /* F15h M60h supports DDR4 mapping as well.. */
f15_m60h_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1538 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1539 					unsigned cs_mode, int cs_mask_nr)
1540 {
1541 	int cs_size;
1542 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1543 
1544 	WARN_ON(cs_mode > 12);
1545 
1546 	if (pvt->dram_type == MEM_DDR4) {
1547 		if (cs_mode > 9)
1548 			return -1;
1549 
1550 		cs_size = ddr4_cs_size(cs_mode);
1551 	} else if (pvt->dram_type == MEM_LRDDR3) {
1552 		unsigned rank_multiply = dcsm & 0xf;
1553 
1554 		if (rank_multiply == 3)
1555 			rank_multiply = 4;
1556 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1557 	} else {
1558 		/* Minimum cs size is 512mb for F15hM60h*/
1559 		if (cs_mode == 0x1)
1560 			return -1;
1561 
1562 		cs_size = ddr3_cs_size(cs_mode, false);
1563 	}
1564 
1565 	return cs_size;
1566 }
1567 
1568 /*
1569  * F16h and F15h model 30h have only limited cs_modes.
1570  */
f16_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1571 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1572 				unsigned cs_mode, int cs_mask_nr)
1573 {
1574 	WARN_ON(cs_mode > 12);
1575 
1576 	if (cs_mode == 6 || cs_mode == 8 ||
1577 	    cs_mode == 9 || cs_mode == 12)
1578 		return -1;
1579 	else
1580 		return ddr3_cs_size(cs_mode, false);
1581 }
1582 
f17_addr_mask_to_cs_size(struct amd64_pvt * pvt,u8 umc,unsigned int cs_mode,int csrow_nr)1583 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1584 				    unsigned int cs_mode, int csrow_nr)
1585 {
1586 	u32 addr_mask_orig, addr_mask_deinterleaved;
1587 	u32 msb, weight, num_zero_bits;
1588 	int dimm, size = 0;
1589 
1590 	/* No Chip Selects are enabled. */
1591 	if (!cs_mode)
1592 		return size;
1593 
1594 	/* Requested size of an even CS but none are enabled. */
1595 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1596 		return size;
1597 
1598 	/* Requested size of an odd CS but none are enabled. */
1599 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1600 		return size;
1601 
1602 	/*
1603 	 * There is one mask per DIMM, and two Chip Selects per DIMM.
1604 	 *	CS0 and CS1 -> DIMM0
1605 	 *	CS2 and CS3 -> DIMM1
1606 	 */
1607 	dimm = csrow_nr >> 1;
1608 
1609 	/* Asymmetric dual-rank DIMM support. */
1610 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1611 		addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1612 	else
1613 		addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1614 
1615 	/*
1616 	 * The number of zero bits in the mask is equal to the number of bits
1617 	 * in a full mask minus the number of bits in the current mask.
1618 	 *
1619 	 * The MSB is the number of bits in the full mask because BIT[0] is
1620 	 * always 0.
1621 	 */
1622 	msb = fls(addr_mask_orig) - 1;
1623 	weight = hweight_long(addr_mask_orig);
1624 	num_zero_bits = msb - weight;
1625 
1626 	/* Take the number of zero bits off from the top of the mask. */
1627 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1628 
1629 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1630 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
1631 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1632 
1633 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
1634 	size = (addr_mask_deinterleaved >> 2) + 1;
1635 
1636 	/* Return size in MBs. */
1637 	return size >> 10;
1638 }
1639 
read_dram_ctl_register(struct amd64_pvt * pvt)1640 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1641 {
1642 
1643 	if (pvt->fam == 0xf)
1644 		return;
1645 
1646 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1647 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1648 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1649 
1650 		edac_dbg(0, "  DCTs operate in %s mode\n",
1651 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1652 
1653 		if (!dct_ganging_enabled(pvt))
1654 			edac_dbg(0, "  Address range split per DCT: %s\n",
1655 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1656 
1657 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1658 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1659 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1660 
1661 		edac_dbg(0, "  channel interleave: %s, "
1662 			 "interleave bits selector: 0x%x\n",
1663 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1664 			 dct_sel_interleave_addr(pvt));
1665 	}
1666 
1667 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1668 }
1669 
1670 /*
1671  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1672  * 2.10.12 Memory Interleaving Modes).
1673  */
f15_m30h_determine_channel(struct amd64_pvt * pvt,u64 sys_addr,u8 intlv_en,int num_dcts_intlv,u32 dct_sel)1674 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1675 				     u8 intlv_en, int num_dcts_intlv,
1676 				     u32 dct_sel)
1677 {
1678 	u8 channel = 0;
1679 	u8 select;
1680 
1681 	if (!(intlv_en))
1682 		return (u8)(dct_sel);
1683 
1684 	if (num_dcts_intlv == 2) {
1685 		select = (sys_addr >> 8) & 0x3;
1686 		channel = select ? 0x3 : 0;
1687 	} else if (num_dcts_intlv == 4) {
1688 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1689 		switch (intlv_addr) {
1690 		case 0x4:
1691 			channel = (sys_addr >> 8) & 0x3;
1692 			break;
1693 		case 0x5:
1694 			channel = (sys_addr >> 9) & 0x3;
1695 			break;
1696 		}
1697 	}
1698 	return channel;
1699 }
1700 
1701 /*
1702  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1703  * Interleaving Modes.
1704  */
f1x_determine_channel(struct amd64_pvt * pvt,u64 sys_addr,bool hi_range_sel,u8 intlv_en)1705 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1706 				bool hi_range_sel, u8 intlv_en)
1707 {
1708 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1709 
1710 	if (dct_ganging_enabled(pvt))
1711 		return 0;
1712 
1713 	if (hi_range_sel)
1714 		return dct_sel_high;
1715 
1716 	/*
1717 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1718 	 */
1719 	if (dct_interleave_enabled(pvt)) {
1720 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1721 
1722 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1723 		if (!intlv_addr)
1724 			return sys_addr >> 6 & 1;
1725 
1726 		if (intlv_addr & 0x2) {
1727 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1728 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1729 
1730 			return ((sys_addr >> shift) & 1) ^ temp;
1731 		}
1732 
1733 		if (intlv_addr & 0x4) {
1734 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
1735 
1736 			return (sys_addr >> shift) & 1;
1737 		}
1738 
1739 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1740 	}
1741 
1742 	if (dct_high_range_enabled(pvt))
1743 		return ~dct_sel_high & 1;
1744 
1745 	return 0;
1746 }
1747 
1748 /* Convert the sys_addr to the normalized DCT address */
f1x_get_norm_dct_addr(struct amd64_pvt * pvt,u8 range,u64 sys_addr,bool hi_rng,u32 dct_sel_base_addr)1749 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1750 				 u64 sys_addr, bool hi_rng,
1751 				 u32 dct_sel_base_addr)
1752 {
1753 	u64 chan_off;
1754 	u64 dram_base		= get_dram_base(pvt, range);
1755 	u64 hole_off		= f10_dhar_offset(pvt);
1756 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1757 
1758 	if (hi_rng) {
1759 		/*
1760 		 * if
1761 		 * base address of high range is below 4Gb
1762 		 * (bits [47:27] at [31:11])
1763 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1764 		 * sys_addr > 4Gb
1765 		 *
1766 		 *	remove hole offset from sys_addr
1767 		 * else
1768 		 *	remove high range offset from sys_addr
1769 		 */
1770 		if ((!(dct_sel_base_addr >> 16) ||
1771 		     dct_sel_base_addr < dhar_base(pvt)) &&
1772 		    dhar_valid(pvt) &&
1773 		    (sys_addr >= BIT_64(32)))
1774 			chan_off = hole_off;
1775 		else
1776 			chan_off = dct_sel_base_off;
1777 	} else {
1778 		/*
1779 		 * if
1780 		 * we have a valid hole		&&
1781 		 * sys_addr > 4Gb
1782 		 *
1783 		 *	remove hole
1784 		 * else
1785 		 *	remove dram base to normalize to DCT address
1786 		 */
1787 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1788 			chan_off = hole_off;
1789 		else
1790 			chan_off = dram_base;
1791 	}
1792 
1793 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1794 }
1795 
1796 /*
1797  * checks if the csrow passed in is marked as SPARED, if so returns the new
1798  * spare row
1799  */
f10_process_possible_spare(struct amd64_pvt * pvt,u8 dct,int csrow)1800 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1801 {
1802 	int tmp_cs;
1803 
1804 	if (online_spare_swap_done(pvt, dct) &&
1805 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1806 
1807 		for_each_chip_select(tmp_cs, dct, pvt) {
1808 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1809 				csrow = tmp_cs;
1810 				break;
1811 			}
1812 		}
1813 	}
1814 	return csrow;
1815 }
1816 
1817 /*
1818  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1819  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1820  *
1821  * Return:
1822  *	-EINVAL:  NOT FOUND
1823  *	0..csrow = Chip-Select Row
1824  */
f1x_lookup_addr_in_dct(u64 in_addr,u8 nid,u8 dct)1825 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1826 {
1827 	struct mem_ctl_info *mci;
1828 	struct amd64_pvt *pvt;
1829 	u64 cs_base, cs_mask;
1830 	int cs_found = -EINVAL;
1831 	int csrow;
1832 
1833 	mci = edac_mc_find(nid);
1834 	if (!mci)
1835 		return cs_found;
1836 
1837 	pvt = mci->pvt_info;
1838 
1839 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1840 
1841 	for_each_chip_select(csrow, dct, pvt) {
1842 		if (!csrow_enabled(csrow, dct, pvt))
1843 			continue;
1844 
1845 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1846 
1847 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1848 			 csrow, cs_base, cs_mask);
1849 
1850 		cs_mask = ~cs_mask;
1851 
1852 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1853 			 (in_addr & cs_mask), (cs_base & cs_mask));
1854 
1855 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1856 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1857 				cs_found =  csrow;
1858 				break;
1859 			}
1860 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1861 
1862 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1863 			break;
1864 		}
1865 	}
1866 	return cs_found;
1867 }
1868 
1869 /*
1870  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1871  * swapped with a region located at the bottom of memory so that the GPU can use
1872  * the interleaved region and thus two channels.
1873  */
f1x_swap_interleaved_region(struct amd64_pvt * pvt,u64 sys_addr)1874 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1875 {
1876 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1877 
1878 	if (pvt->fam == 0x10) {
1879 		/* only revC3 and revE have that feature */
1880 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1881 			return sys_addr;
1882 	}
1883 
1884 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1885 
1886 	if (!(swap_reg & 0x1))
1887 		return sys_addr;
1888 
1889 	swap_base	= (swap_reg >> 3) & 0x7f;
1890 	swap_limit	= (swap_reg >> 11) & 0x7f;
1891 	rgn_size	= (swap_reg >> 20) & 0x7f;
1892 	tmp_addr	= sys_addr >> 27;
1893 
1894 	if (!(sys_addr >> 34) &&
1895 	    (((tmp_addr >= swap_base) &&
1896 	     (tmp_addr <= swap_limit)) ||
1897 	     (tmp_addr < rgn_size)))
1898 		return sys_addr ^ (u64)swap_base << 27;
1899 
1900 	return sys_addr;
1901 }
1902 
1903 /* For a given @dram_range, check if @sys_addr falls within it. */
f1x_match_to_this_node(struct amd64_pvt * pvt,unsigned range,u64 sys_addr,int * chan_sel)1904 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1905 				  u64 sys_addr, int *chan_sel)
1906 {
1907 	int cs_found = -EINVAL;
1908 	u64 chan_addr;
1909 	u32 dct_sel_base;
1910 	u8 channel;
1911 	bool high_range = false;
1912 
1913 	u8 node_id    = dram_dst_node(pvt, range);
1914 	u8 intlv_en   = dram_intlv_en(pvt, range);
1915 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1916 
1917 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1918 		 range, sys_addr, get_dram_limit(pvt, range));
1919 
1920 	if (dhar_valid(pvt) &&
1921 	    dhar_base(pvt) <= sys_addr &&
1922 	    sys_addr < BIT_64(32)) {
1923 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1924 			    sys_addr);
1925 		return -EINVAL;
1926 	}
1927 
1928 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1929 		return -EINVAL;
1930 
1931 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1932 
1933 	dct_sel_base = dct_sel_baseaddr(pvt);
1934 
1935 	/*
1936 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1937 	 * select between DCT0 and DCT1.
1938 	 */
1939 	if (dct_high_range_enabled(pvt) &&
1940 	   !dct_ganging_enabled(pvt) &&
1941 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1942 		high_range = true;
1943 
1944 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1945 
1946 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1947 					  high_range, dct_sel_base);
1948 
1949 	/* Remove node interleaving, see F1x120 */
1950 	if (intlv_en)
1951 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1952 			    (chan_addr & 0xfff);
1953 
1954 	/* remove channel interleave */
1955 	if (dct_interleave_enabled(pvt) &&
1956 	   !dct_high_range_enabled(pvt) &&
1957 	   !dct_ganging_enabled(pvt)) {
1958 
1959 		if (dct_sel_interleave_addr(pvt) != 1) {
1960 			if (dct_sel_interleave_addr(pvt) == 0x3)
1961 				/* hash 9 */
1962 				chan_addr = ((chan_addr >> 10) << 9) |
1963 					     (chan_addr & 0x1ff);
1964 			else
1965 				/* A[6] or hash 6 */
1966 				chan_addr = ((chan_addr >> 7) << 6) |
1967 					     (chan_addr & 0x3f);
1968 		} else
1969 			/* A[12] */
1970 			chan_addr = ((chan_addr >> 13) << 12) |
1971 				     (chan_addr & 0xfff);
1972 	}
1973 
1974 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1975 
1976 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1977 
1978 	if (cs_found >= 0)
1979 		*chan_sel = channel;
1980 
1981 	return cs_found;
1982 }
1983 
f15_m30h_match_to_this_node(struct amd64_pvt * pvt,unsigned range,u64 sys_addr,int * chan_sel)1984 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1985 					u64 sys_addr, int *chan_sel)
1986 {
1987 	int cs_found = -EINVAL;
1988 	int num_dcts_intlv = 0;
1989 	u64 chan_addr, chan_offset;
1990 	u64 dct_base, dct_limit;
1991 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1992 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1993 
1994 	u64 dhar_offset		= f10_dhar_offset(pvt);
1995 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1996 	u8 node_id		= dram_dst_node(pvt, range);
1997 	u8 intlv_en		= dram_intlv_en(pvt, range);
1998 
1999 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2000 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2001 
2002 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2003 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
2004 
2005 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2006 		 range, sys_addr, get_dram_limit(pvt, range));
2007 
2008 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
2009 	    !(get_dram_limit(pvt, range) >= sys_addr))
2010 		return -EINVAL;
2011 
2012 	if (dhar_valid(pvt) &&
2013 	    dhar_base(pvt) <= sys_addr &&
2014 	    sys_addr < BIT_64(32)) {
2015 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2016 			    sys_addr);
2017 		return -EINVAL;
2018 	}
2019 
2020 	/* Verify sys_addr is within DCT Range. */
2021 	dct_base = (u64) dct_sel_baseaddr(pvt);
2022 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2023 
2024 	if (!(dct_cont_base_reg & BIT(0)) &&
2025 	    !(dct_base <= (sys_addr >> 27) &&
2026 	      dct_limit >= (sys_addr >> 27)))
2027 		return -EINVAL;
2028 
2029 	/* Verify number of dct's that participate in channel interleaving. */
2030 	num_dcts_intlv = (int) hweight8(intlv_en);
2031 
2032 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2033 		return -EINVAL;
2034 
2035 	if (pvt->model >= 0x60)
2036 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2037 	else
2038 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2039 						     num_dcts_intlv, dct_sel);
2040 
2041 	/* Verify we stay within the MAX number of channels allowed */
2042 	if (channel > 3)
2043 		return -EINVAL;
2044 
2045 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2046 
2047 	/* Get normalized DCT addr */
2048 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2049 		chan_offset = dhar_offset;
2050 	else
2051 		chan_offset = dct_base << 27;
2052 
2053 	chan_addr = sys_addr - chan_offset;
2054 
2055 	/* remove channel interleave */
2056 	if (num_dcts_intlv == 2) {
2057 		if (intlv_addr == 0x4)
2058 			chan_addr = ((chan_addr >> 9) << 8) |
2059 						(chan_addr & 0xff);
2060 		else if (intlv_addr == 0x5)
2061 			chan_addr = ((chan_addr >> 10) << 9) |
2062 						(chan_addr & 0x1ff);
2063 		else
2064 			return -EINVAL;
2065 
2066 	} else if (num_dcts_intlv == 4) {
2067 		if (intlv_addr == 0x4)
2068 			chan_addr = ((chan_addr >> 10) << 8) |
2069 							(chan_addr & 0xff);
2070 		else if (intlv_addr == 0x5)
2071 			chan_addr = ((chan_addr >> 11) << 9) |
2072 							(chan_addr & 0x1ff);
2073 		else
2074 			return -EINVAL;
2075 	}
2076 
2077 	if (dct_offset_en) {
2078 		amd64_read_pci_cfg(pvt->F1,
2079 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2080 				   &tmp);
2081 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2082 	}
2083 
2084 	f15h_select_dct(pvt, channel);
2085 
2086 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2087 
2088 	/*
2089 	 * Find Chip select:
2090 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2091 	 * there is support for 4 DCT's, but only 2 are currently functional.
2092 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2093 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2094 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2095 	 */
2096 	alias_channel =  (channel == 3) ? 1 : channel;
2097 
2098 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2099 
2100 	if (cs_found >= 0)
2101 		*chan_sel = alias_channel;
2102 
2103 	return cs_found;
2104 }
2105 
f1x_translate_sysaddr_to_cs(struct amd64_pvt * pvt,u64 sys_addr,int * chan_sel)2106 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2107 					u64 sys_addr,
2108 					int *chan_sel)
2109 {
2110 	int cs_found = -EINVAL;
2111 	unsigned range;
2112 
2113 	for (range = 0; range < DRAM_RANGES; range++) {
2114 		if (!dram_rw(pvt, range))
2115 			continue;
2116 
2117 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2118 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2119 							       sys_addr,
2120 							       chan_sel);
2121 
2122 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2123 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2124 			cs_found = f1x_match_to_this_node(pvt, range,
2125 							  sys_addr, chan_sel);
2126 			if (cs_found >= 0)
2127 				break;
2128 		}
2129 	}
2130 	return cs_found;
2131 }
2132 
2133 /*
2134  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2135  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2136  *
2137  * The @sys_addr is usually an error address received from the hardware
2138  * (MCX_ADDR).
2139  */
f1x_map_sysaddr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr,struct err_info * err)2140 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2141 				     struct err_info *err)
2142 {
2143 	struct amd64_pvt *pvt = mci->pvt_info;
2144 
2145 	error_address_to_page_and_offset(sys_addr, err);
2146 
2147 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2148 	if (err->csrow < 0) {
2149 		err->err_code = ERR_CSROW;
2150 		return;
2151 	}
2152 
2153 	/*
2154 	 * We need the syndromes for channel detection only when we're
2155 	 * ganged. Otherwise @chan should already contain the channel at
2156 	 * this point.
2157 	 */
2158 	if (dct_ganging_enabled(pvt))
2159 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2160 }
2161 
2162 /*
2163  * debug routine to display the memory sizes of all logical DIMMs and its
2164  * CSROWs
2165  */
debug_display_dimm_sizes(struct amd64_pvt * pvt,u8 ctrl)2166 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2167 {
2168 	int dimm, size0, size1;
2169 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2170 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
2171 
2172 	if (pvt->fam == 0xf) {
2173 		/* K8 families < revF not supported yet */
2174 	       if (pvt->ext_model < K8_REV_F)
2175 			return;
2176 	       else
2177 		       WARN_ON(ctrl != 0);
2178 	}
2179 
2180 	if (pvt->fam == 0x10) {
2181 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2182 							   : pvt->dbam0;
2183 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2184 				 pvt->csels[1].csbases :
2185 				 pvt->csels[0].csbases;
2186 	} else if (ctrl) {
2187 		dbam = pvt->dbam0;
2188 		dcsb = pvt->csels[1].csbases;
2189 	}
2190 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2191 		 ctrl, dbam);
2192 
2193 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2194 
2195 	/* Dump memory sizes for DIMM and its CSROWs */
2196 	for (dimm = 0; dimm < 4; dimm++) {
2197 
2198 		size0 = 0;
2199 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2200 			/*
2201 			 * For F15m60h, we need multiplier for LRDIMM cs_size
2202 			 * calculation. We pass dimm value to the dbam_to_cs
2203 			 * mapper so we can find the multiplier from the
2204 			 * corresponding DCSM.
2205 			 */
2206 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2207 						     DBAM_DIMM(dimm, dbam),
2208 						     dimm);
2209 
2210 		size1 = 0;
2211 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2212 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2213 						     DBAM_DIMM(dimm, dbam),
2214 						     dimm);
2215 
2216 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2217 				dimm * 2,     size0,
2218 				dimm * 2 + 1, size1);
2219 	}
2220 }
2221 
2222 static struct amd64_family_type family_types[] = {
2223 	[K8_CPUS] = {
2224 		.ctl_name = "K8",
2225 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2226 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2227 		.ops = {
2228 			.early_channel_count	= k8_early_channel_count,
2229 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
2230 			.dbam_to_cs		= k8_dbam_to_chip_select,
2231 		}
2232 	},
2233 	[F10_CPUS] = {
2234 		.ctl_name = "F10h",
2235 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2236 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2237 		.ops = {
2238 			.early_channel_count	= f1x_early_channel_count,
2239 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2240 			.dbam_to_cs		= f10_dbam_to_chip_select,
2241 		}
2242 	},
2243 	[F15_CPUS] = {
2244 		.ctl_name = "F15h",
2245 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2246 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2247 		.ops = {
2248 			.early_channel_count	= f1x_early_channel_count,
2249 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2250 			.dbam_to_cs		= f15_dbam_to_chip_select,
2251 		}
2252 	},
2253 	[F15_M30H_CPUS] = {
2254 		.ctl_name = "F15h_M30h",
2255 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2256 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2257 		.ops = {
2258 			.early_channel_count	= f1x_early_channel_count,
2259 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2260 			.dbam_to_cs		= f16_dbam_to_chip_select,
2261 		}
2262 	},
2263 	[F15_M60H_CPUS] = {
2264 		.ctl_name = "F15h_M60h",
2265 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2266 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2267 		.ops = {
2268 			.early_channel_count	= f1x_early_channel_count,
2269 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2270 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
2271 		}
2272 	},
2273 	[F16_CPUS] = {
2274 		.ctl_name = "F16h",
2275 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2276 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2277 		.ops = {
2278 			.early_channel_count	= f1x_early_channel_count,
2279 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2280 			.dbam_to_cs		= f16_dbam_to_chip_select,
2281 		}
2282 	},
2283 	[F16_M30H_CPUS] = {
2284 		.ctl_name = "F16h_M30h",
2285 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2286 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2287 		.ops = {
2288 			.early_channel_count	= f1x_early_channel_count,
2289 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2290 			.dbam_to_cs		= f16_dbam_to_chip_select,
2291 		}
2292 	},
2293 	[F17_CPUS] = {
2294 		.ctl_name = "F17h",
2295 		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2296 		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2297 		.ops = {
2298 			.early_channel_count	= f17_early_channel_count,
2299 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2300 		}
2301 	},
2302 	[F17_M10H_CPUS] = {
2303 		.ctl_name = "F17h_M10h",
2304 		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2305 		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2306 		.ops = {
2307 			.early_channel_count	= f17_early_channel_count,
2308 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2309 		}
2310 	},
2311 	[F17_M30H_CPUS] = {
2312 		.ctl_name = "F17h_M30h",
2313 		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2314 		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2315 		.ops = {
2316 			.early_channel_count	= f17_early_channel_count,
2317 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2318 		}
2319 	},
2320 	[F17_M70H_CPUS] = {
2321 		.ctl_name = "F17h_M70h",
2322 		.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2323 		.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2324 		.ops = {
2325 			.early_channel_count	= f17_early_channel_count,
2326 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2327 		}
2328 	},
2329 };
2330 
2331 /*
2332  * These are tables of eigenvectors (one per line) which can be used for the
2333  * construction of the syndrome tables. The modified syndrome search algorithm
2334  * uses those to find the symbol in error and thus the DIMM.
2335  *
2336  * Algorithm courtesy of Ross LaFetra from AMD.
2337  */
2338 static const u16 x4_vectors[] = {
2339 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2340 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2341 	0x0001, 0x0002, 0x0004, 0x0008,
2342 	0x1013, 0x3032, 0x4044, 0x8088,
2343 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2344 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2345 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2346 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2347 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2348 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2349 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2350 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2351 	0x2b87, 0x164e, 0x642c, 0xdc18,
2352 	0x40b9, 0x80de, 0x1094, 0x20e8,
2353 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2354 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2355 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2356 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2357 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2358 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2359 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2360 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2361 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2362 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2363 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2364 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2365 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2366 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2367 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2368 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2369 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2370 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2371 	0x4807, 0xc40e, 0x130c, 0x3208,
2372 	0x1905, 0x2e0a, 0x5804, 0xac08,
2373 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2374 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2375 };
2376 
2377 static const u16 x8_vectors[] = {
2378 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2379 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2380 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2381 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2382 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2383 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2384 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2385 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2386 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2387 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2388 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2389 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2390 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2391 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2392 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2393 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2394 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2395 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2396 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2397 };
2398 
decode_syndrome(u16 syndrome,const u16 * vectors,unsigned num_vecs,unsigned v_dim)2399 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2400 			   unsigned v_dim)
2401 {
2402 	unsigned int i, err_sym;
2403 
2404 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2405 		u16 s = syndrome;
2406 		unsigned v_idx =  err_sym * v_dim;
2407 		unsigned v_end = (err_sym + 1) * v_dim;
2408 
2409 		/* walk over all 16 bits of the syndrome */
2410 		for (i = 1; i < (1U << 16); i <<= 1) {
2411 
2412 			/* if bit is set in that eigenvector... */
2413 			if (v_idx < v_end && vectors[v_idx] & i) {
2414 				u16 ev_comp = vectors[v_idx++];
2415 
2416 				/* ... and bit set in the modified syndrome, */
2417 				if (s & i) {
2418 					/* remove it. */
2419 					s ^= ev_comp;
2420 
2421 					if (!s)
2422 						return err_sym;
2423 				}
2424 
2425 			} else if (s & i)
2426 				/* can't get to zero, move to next symbol */
2427 				break;
2428 		}
2429 	}
2430 
2431 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2432 	return -1;
2433 }
2434 
map_err_sym_to_channel(int err_sym,int sym_size)2435 static int map_err_sym_to_channel(int err_sym, int sym_size)
2436 {
2437 	if (sym_size == 4)
2438 		switch (err_sym) {
2439 		case 0x20:
2440 		case 0x21:
2441 			return 0;
2442 			break;
2443 		case 0x22:
2444 		case 0x23:
2445 			return 1;
2446 			break;
2447 		default:
2448 			return err_sym >> 4;
2449 			break;
2450 		}
2451 	/* x8 symbols */
2452 	else
2453 		switch (err_sym) {
2454 		/* imaginary bits not in a DIMM */
2455 		case 0x10:
2456 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2457 					  err_sym);
2458 			return -1;
2459 			break;
2460 
2461 		case 0x11:
2462 			return 0;
2463 			break;
2464 		case 0x12:
2465 			return 1;
2466 			break;
2467 		default:
2468 			return err_sym >> 3;
2469 			break;
2470 		}
2471 	return -1;
2472 }
2473 
get_channel_from_ecc_syndrome(struct mem_ctl_info * mci,u16 syndrome)2474 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2475 {
2476 	struct amd64_pvt *pvt = mci->pvt_info;
2477 	int err_sym = -1;
2478 
2479 	if (pvt->ecc_sym_sz == 8)
2480 		err_sym = decode_syndrome(syndrome, x8_vectors,
2481 					  ARRAY_SIZE(x8_vectors),
2482 					  pvt->ecc_sym_sz);
2483 	else if (pvt->ecc_sym_sz == 4)
2484 		err_sym = decode_syndrome(syndrome, x4_vectors,
2485 					  ARRAY_SIZE(x4_vectors),
2486 					  pvt->ecc_sym_sz);
2487 	else {
2488 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2489 		return err_sym;
2490 	}
2491 
2492 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2493 }
2494 
__log_ecc_error(struct mem_ctl_info * mci,struct err_info * err,u8 ecc_type)2495 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2496 			    u8 ecc_type)
2497 {
2498 	enum hw_event_mc_err_type err_type;
2499 	const char *string;
2500 
2501 	if (ecc_type == 2)
2502 		err_type = HW_EVENT_ERR_CORRECTED;
2503 	else if (ecc_type == 1)
2504 		err_type = HW_EVENT_ERR_UNCORRECTED;
2505 	else if (ecc_type == 3)
2506 		err_type = HW_EVENT_ERR_DEFERRED;
2507 	else {
2508 		WARN(1, "Something is rotten in the state of Denmark.\n");
2509 		return;
2510 	}
2511 
2512 	switch (err->err_code) {
2513 	case DECODE_OK:
2514 		string = "";
2515 		break;
2516 	case ERR_NODE:
2517 		string = "Failed to map error addr to a node";
2518 		break;
2519 	case ERR_CSROW:
2520 		string = "Failed to map error addr to a csrow";
2521 		break;
2522 	case ERR_CHANNEL:
2523 		string = "Unknown syndrome - possible error reporting race";
2524 		break;
2525 	case ERR_SYND:
2526 		string = "MCA_SYND not valid - unknown syndrome and csrow";
2527 		break;
2528 	case ERR_NORM_ADDR:
2529 		string = "Cannot decode normalized address";
2530 		break;
2531 	default:
2532 		string = "WTF error";
2533 		break;
2534 	}
2535 
2536 	edac_mc_handle_error(err_type, mci, 1,
2537 			     err->page, err->offset, err->syndrome,
2538 			     err->csrow, err->channel, -1,
2539 			     string, "");
2540 }
2541 
decode_bus_error(int node_id,struct mce * m)2542 static inline void decode_bus_error(int node_id, struct mce *m)
2543 {
2544 	struct mem_ctl_info *mci;
2545 	struct amd64_pvt *pvt;
2546 	u8 ecc_type = (m->status >> 45) & 0x3;
2547 	u8 xec = XEC(m->status, 0x1f);
2548 	u16 ec = EC(m->status);
2549 	u64 sys_addr;
2550 	struct err_info err;
2551 
2552 	mci = edac_mc_find(node_id);
2553 	if (!mci)
2554 		return;
2555 
2556 	pvt = mci->pvt_info;
2557 
2558 	/* Bail out early if this was an 'observed' error */
2559 	if (PP(ec) == NBSL_PP_OBS)
2560 		return;
2561 
2562 	/* Do only ECC errors */
2563 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2564 		return;
2565 
2566 	memset(&err, 0, sizeof(err));
2567 
2568 	sys_addr = get_error_address(pvt, m);
2569 
2570 	if (ecc_type == 2)
2571 		err.syndrome = extract_syndrome(m->status);
2572 
2573 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2574 
2575 	__log_ecc_error(mci, &err, ecc_type);
2576 }
2577 
2578 /*
2579  * To find the UMC channel represented by this bank we need to match on its
2580  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2581  * IPID.
2582  *
2583  * Currently, we can derive the channel number by looking at the 6th nibble in
2584  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2585  * number.
2586  */
find_umc_channel(struct mce * m)2587 static int find_umc_channel(struct mce *m)
2588 {
2589 	return (m->ipid & GENMASK(31, 0)) >> 20;
2590 }
2591 
decode_umc_error(int node_id,struct mce * m)2592 static void decode_umc_error(int node_id, struct mce *m)
2593 {
2594 	u8 ecc_type = (m->status >> 45) & 0x3;
2595 	struct mem_ctl_info *mci;
2596 	struct amd64_pvt *pvt;
2597 	struct err_info err;
2598 	u64 sys_addr;
2599 
2600 	mci = edac_mc_find(node_id);
2601 	if (!mci)
2602 		return;
2603 
2604 	pvt = mci->pvt_info;
2605 
2606 	memset(&err, 0, sizeof(err));
2607 
2608 	if (m->status & MCI_STATUS_DEFERRED)
2609 		ecc_type = 3;
2610 
2611 	err.channel = find_umc_channel(m);
2612 
2613 	if (!(m->status & MCI_STATUS_SYNDV)) {
2614 		err.err_code = ERR_SYND;
2615 		goto log_error;
2616 	}
2617 
2618 	if (ecc_type == 2) {
2619 		u8 length = (m->synd >> 18) & 0x3f;
2620 
2621 		if (length)
2622 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2623 		else
2624 			err.err_code = ERR_CHANNEL;
2625 	}
2626 
2627 	err.csrow = m->synd & 0x7;
2628 
2629 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2630 		err.err_code = ERR_NORM_ADDR;
2631 		goto log_error;
2632 	}
2633 
2634 	error_address_to_page_and_offset(sys_addr, &err);
2635 
2636 log_error:
2637 	__log_ecc_error(mci, &err, ecc_type);
2638 }
2639 
2640 /*
2641  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2642  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2643  * Reserve F0 and F6 on systems with a UMC.
2644  */
2645 static int
reserve_mc_sibling_devs(struct amd64_pvt * pvt,u16 pci_id1,u16 pci_id2)2646 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2647 {
2648 	if (pvt->umc) {
2649 		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2650 		if (!pvt->F0) {
2651 			amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2652 			return -ENODEV;
2653 		}
2654 
2655 		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2656 		if (!pvt->F6) {
2657 			pci_dev_put(pvt->F0);
2658 			pvt->F0 = NULL;
2659 
2660 			amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2661 			return -ENODEV;
2662 		}
2663 
2664 		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2665 		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2666 		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2667 
2668 		return 0;
2669 	}
2670 
2671 	/* Reserve the ADDRESS MAP Device */
2672 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2673 	if (!pvt->F1) {
2674 		amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2675 		return -ENODEV;
2676 	}
2677 
2678 	/* Reserve the DCT Device */
2679 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2680 	if (!pvt->F2) {
2681 		pci_dev_put(pvt->F1);
2682 		pvt->F1 = NULL;
2683 
2684 		amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2685 		return -ENODEV;
2686 	}
2687 
2688 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2689 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2690 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2691 
2692 	return 0;
2693 }
2694 
free_mc_sibling_devs(struct amd64_pvt * pvt)2695 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2696 {
2697 	if (pvt->umc) {
2698 		pci_dev_put(pvt->F0);
2699 		pci_dev_put(pvt->F6);
2700 	} else {
2701 		pci_dev_put(pvt->F1);
2702 		pci_dev_put(pvt->F2);
2703 	}
2704 }
2705 
determine_ecc_sym_sz(struct amd64_pvt * pvt)2706 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2707 {
2708 	pvt->ecc_sym_sz = 4;
2709 
2710 	if (pvt->umc) {
2711 		u8 i;
2712 
2713 		for_each_umc(i) {
2714 			/* Check enabled channels only: */
2715 			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2716 				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2717 					pvt->ecc_sym_sz = 16;
2718 					return;
2719 				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2720 					pvt->ecc_sym_sz = 8;
2721 					return;
2722 				}
2723 			}
2724 		}
2725 	} else if (pvt->fam >= 0x10) {
2726 		u32 tmp;
2727 
2728 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2729 		/* F16h has only DCT0, so no need to read dbam1. */
2730 		if (pvt->fam != 0x16)
2731 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2732 
2733 		/* F10h, revD and later can do x8 ECC too. */
2734 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2735 			pvt->ecc_sym_sz = 8;
2736 	}
2737 }
2738 
2739 /*
2740  * Retrieve the hardware registers of the memory controller.
2741  */
__read_mc_regs_df(struct amd64_pvt * pvt)2742 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2743 {
2744 	u8 nid = pvt->mc_node_id;
2745 	struct amd64_umc *umc;
2746 	u32 i, umc_base;
2747 
2748 	/* Read registers from each UMC */
2749 	for_each_umc(i) {
2750 
2751 		umc_base = get_umc_base(i);
2752 		umc = &pvt->umc[i];
2753 
2754 		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2755 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2756 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2757 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2758 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2759 	}
2760 }
2761 
2762 /*
2763  * Retrieve the hardware registers of the memory controller (this includes the
2764  * 'Address Map' and 'Misc' device regs)
2765  */
read_mc_regs(struct amd64_pvt * pvt)2766 static void read_mc_regs(struct amd64_pvt *pvt)
2767 {
2768 	unsigned int range;
2769 	u64 msr_val;
2770 
2771 	/*
2772 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2773 	 * those are Read-As-Zero.
2774 	 */
2775 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2776 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2777 
2778 	/* Check first whether TOP_MEM2 is enabled: */
2779 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2780 	if (msr_val & BIT(21)) {
2781 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2782 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2783 	} else {
2784 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2785 	}
2786 
2787 	if (pvt->umc) {
2788 		__read_mc_regs_df(pvt);
2789 		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2790 
2791 		goto skip;
2792 	}
2793 
2794 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2795 
2796 	read_dram_ctl_register(pvt);
2797 
2798 	for (range = 0; range < DRAM_RANGES; range++) {
2799 		u8 rw;
2800 
2801 		/* read settings for this DRAM range */
2802 		read_dram_base_limit_regs(pvt, range);
2803 
2804 		rw = dram_rw(pvt, range);
2805 		if (!rw)
2806 			continue;
2807 
2808 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2809 			 range,
2810 			 get_dram_base(pvt, range),
2811 			 get_dram_limit(pvt, range));
2812 
2813 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2814 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2815 			 (rw & 0x1) ? "R" : "-",
2816 			 (rw & 0x2) ? "W" : "-",
2817 			 dram_intlv_sel(pvt, range),
2818 			 dram_dst_node(pvt, range));
2819 	}
2820 
2821 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2822 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2823 
2824 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2825 
2826 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2827 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2828 
2829 	if (!dct_ganging_enabled(pvt)) {
2830 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2831 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2832 	}
2833 
2834 skip:
2835 	read_dct_base_mask(pvt);
2836 
2837 	determine_memory_type(pvt);
2838 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2839 
2840 	determine_ecc_sym_sz(pvt);
2841 
2842 	dump_misc_regs(pvt);
2843 }
2844 
2845 /*
2846  * NOTE: CPU Revision Dependent code
2847  *
2848  * Input:
2849  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2850  *	k8 private pointer to -->
2851  *			DRAM Bank Address mapping register
2852  *			node_id
2853  *			DCL register where dual_channel_active is
2854  *
2855  * The DBAM register consists of 4 sets of 4 bits each definitions:
2856  *
2857  * Bits:	CSROWs
2858  * 0-3		CSROWs 0 and 1
2859  * 4-7		CSROWs 2 and 3
2860  * 8-11		CSROWs 4 and 5
2861  * 12-15	CSROWs 6 and 7
2862  *
2863  * Values range from: 0 to 15
2864  * The meaning of the values depends on CPU revision and dual-channel state,
2865  * see relevant BKDG more info.
2866  *
2867  * The memory controller provides for total of only 8 CSROWs in its current
2868  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2869  * single channel or two (2) DIMMs in dual channel mode.
2870  *
2871  * The following code logic collapses the various tables for CSROW based on CPU
2872  * revision.
2873  *
2874  * Returns:
2875  *	The number of PAGE_SIZE pages on the specified CSROW number it
2876  *	encompasses
2877  *
2878  */
get_csrow_nr_pages(struct amd64_pvt * pvt,u8 dct,int csrow_nr_orig)2879 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2880 {
2881 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2882 	int csrow_nr = csrow_nr_orig;
2883 	u32 cs_mode, nr_pages;
2884 
2885 	if (!pvt->umc) {
2886 		csrow_nr >>= 1;
2887 		cs_mode = DBAM_DIMM(csrow_nr, dbam);
2888 	} else {
2889 		cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2890 	}
2891 
2892 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2893 	nr_pages <<= 20 - PAGE_SHIFT;
2894 
2895 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2896 		    csrow_nr_orig, dct,  cs_mode);
2897 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2898 
2899 	return nr_pages;
2900 }
2901 
init_csrows_df(struct mem_ctl_info * mci)2902 static int init_csrows_df(struct mem_ctl_info *mci)
2903 {
2904 	struct amd64_pvt *pvt = mci->pvt_info;
2905 	enum edac_type edac_mode = EDAC_NONE;
2906 	enum dev_type dev_type = DEV_UNKNOWN;
2907 	struct dimm_info *dimm;
2908 	int empty = 1;
2909 	u8 umc, cs;
2910 
2911 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2912 		edac_mode = EDAC_S16ECD16ED;
2913 		dev_type = DEV_X16;
2914 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2915 		edac_mode = EDAC_S8ECD8ED;
2916 		dev_type = DEV_X8;
2917 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2918 		edac_mode = EDAC_S4ECD4ED;
2919 		dev_type = DEV_X4;
2920 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2921 		edac_mode = EDAC_SECDED;
2922 	}
2923 
2924 	for_each_umc(umc) {
2925 		for_each_chip_select(cs, umc, pvt) {
2926 			if (!csrow_enabled(cs, umc, pvt))
2927 				continue;
2928 
2929 			empty = 0;
2930 			dimm = mci->csrows[cs]->channels[umc]->dimm;
2931 
2932 			edac_dbg(1, "MC node: %d, csrow: %d\n",
2933 					pvt->mc_node_id, cs);
2934 
2935 			dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2936 			dimm->mtype = pvt->dram_type;
2937 			dimm->edac_mode = edac_mode;
2938 			dimm->dtype = dev_type;
2939 		}
2940 	}
2941 
2942 	return empty;
2943 }
2944 
2945 /*
2946  * Initialize the array of csrow attribute instances, based on the values
2947  * from pci config hardware registers.
2948  */
init_csrows(struct mem_ctl_info * mci)2949 static int init_csrows(struct mem_ctl_info *mci)
2950 {
2951 	struct amd64_pvt *pvt = mci->pvt_info;
2952 	enum edac_type edac_mode = EDAC_NONE;
2953 	struct csrow_info *csrow;
2954 	struct dimm_info *dimm;
2955 	int i, j, empty = 1;
2956 	int nr_pages = 0;
2957 	u32 val;
2958 
2959 	if (pvt->umc)
2960 		return init_csrows_df(mci);
2961 
2962 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2963 
2964 	pvt->nbcfg = val;
2965 
2966 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2967 		 pvt->mc_node_id, val,
2968 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2969 
2970 	/*
2971 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2972 	 */
2973 	for_each_chip_select(i, 0, pvt) {
2974 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2975 		bool row_dct1 = false;
2976 
2977 		if (pvt->fam != 0xf)
2978 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2979 
2980 		if (!row_dct0 && !row_dct1)
2981 			continue;
2982 
2983 		csrow = mci->csrows[i];
2984 		empty = 0;
2985 
2986 		edac_dbg(1, "MC node: %d, csrow: %d\n",
2987 			    pvt->mc_node_id, i);
2988 
2989 		if (row_dct0) {
2990 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
2991 			csrow->channels[0]->dimm->nr_pages = nr_pages;
2992 		}
2993 
2994 		/* K8 has only one DCT */
2995 		if (pvt->fam != 0xf && row_dct1) {
2996 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2997 
2998 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2999 			nr_pages += row_dct1_pages;
3000 		}
3001 
3002 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3003 
3004 		/* Determine DIMM ECC mode: */
3005 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3006 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3007 					? EDAC_S4ECD4ED
3008 					: EDAC_SECDED;
3009 		}
3010 
3011 		for (j = 0; j < pvt->channel_count; j++) {
3012 			dimm = csrow->channels[j]->dimm;
3013 			dimm->mtype = pvt->dram_type;
3014 			dimm->edac_mode = edac_mode;
3015 		}
3016 	}
3017 
3018 	return empty;
3019 }
3020 
3021 /* get all cores on this DCT */
get_cpus_on_this_dct_cpumask(struct cpumask * mask,u16 nid)3022 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3023 {
3024 	int cpu;
3025 
3026 	for_each_online_cpu(cpu)
3027 		if (amd_get_nb_id(cpu) == nid)
3028 			cpumask_set_cpu(cpu, mask);
3029 }
3030 
3031 /* check MCG_CTL on all the cpus on this node */
nb_mce_bank_enabled_on_node(u16 nid)3032 static bool nb_mce_bank_enabled_on_node(u16 nid)
3033 {
3034 	cpumask_var_t mask;
3035 	int cpu, nbe;
3036 	bool ret = false;
3037 
3038 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3039 		amd64_warn("%s: Error allocating mask\n", __func__);
3040 		return false;
3041 	}
3042 
3043 	get_cpus_on_this_dct_cpumask(mask, nid);
3044 
3045 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3046 
3047 	for_each_cpu(cpu, mask) {
3048 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3049 		nbe = reg->l & MSR_MCGCTL_NBE;
3050 
3051 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3052 			 cpu, reg->q,
3053 			 (nbe ? "enabled" : "disabled"));
3054 
3055 		if (!nbe)
3056 			goto out;
3057 	}
3058 	ret = true;
3059 
3060 out:
3061 	free_cpumask_var(mask);
3062 	return ret;
3063 }
3064 
toggle_ecc_err_reporting(struct ecc_settings * s,u16 nid,bool on)3065 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3066 {
3067 	cpumask_var_t cmask;
3068 	int cpu;
3069 
3070 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3071 		amd64_warn("%s: error allocating mask\n", __func__);
3072 		return -ENOMEM;
3073 	}
3074 
3075 	get_cpus_on_this_dct_cpumask(cmask, nid);
3076 
3077 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3078 
3079 	for_each_cpu(cpu, cmask) {
3080 
3081 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3082 
3083 		if (on) {
3084 			if (reg->l & MSR_MCGCTL_NBE)
3085 				s->flags.nb_mce_enable = 1;
3086 
3087 			reg->l |= MSR_MCGCTL_NBE;
3088 		} else {
3089 			/*
3090 			 * Turn off NB MCE reporting only when it was off before
3091 			 */
3092 			if (!s->flags.nb_mce_enable)
3093 				reg->l &= ~MSR_MCGCTL_NBE;
3094 		}
3095 	}
3096 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3097 
3098 	free_cpumask_var(cmask);
3099 
3100 	return 0;
3101 }
3102 
enable_ecc_error_reporting(struct ecc_settings * s,u16 nid,struct pci_dev * F3)3103 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3104 				       struct pci_dev *F3)
3105 {
3106 	bool ret = true;
3107 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3108 
3109 	if (toggle_ecc_err_reporting(s, nid, ON)) {
3110 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3111 		return false;
3112 	}
3113 
3114 	amd64_read_pci_cfg(F3, NBCTL, &value);
3115 
3116 	s->old_nbctl   = value & mask;
3117 	s->nbctl_valid = true;
3118 
3119 	value |= mask;
3120 	amd64_write_pci_cfg(F3, NBCTL, value);
3121 
3122 	amd64_read_pci_cfg(F3, NBCFG, &value);
3123 
3124 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3125 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3126 
3127 	if (!(value & NBCFG_ECC_ENABLE)) {
3128 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3129 
3130 		s->flags.nb_ecc_prev = 0;
3131 
3132 		/* Attempt to turn on DRAM ECC Enable */
3133 		value |= NBCFG_ECC_ENABLE;
3134 		amd64_write_pci_cfg(F3, NBCFG, value);
3135 
3136 		amd64_read_pci_cfg(F3, NBCFG, &value);
3137 
3138 		if (!(value & NBCFG_ECC_ENABLE)) {
3139 			amd64_warn("Hardware rejected DRAM ECC enable,"
3140 				   "check memory DIMM configuration.\n");
3141 			ret = false;
3142 		} else {
3143 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3144 		}
3145 	} else {
3146 		s->flags.nb_ecc_prev = 1;
3147 	}
3148 
3149 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3150 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3151 
3152 	return ret;
3153 }
3154 
restore_ecc_error_reporting(struct ecc_settings * s,u16 nid,struct pci_dev * F3)3155 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3156 					struct pci_dev *F3)
3157 {
3158 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3159 
3160 	if (!s->nbctl_valid)
3161 		return;
3162 
3163 	amd64_read_pci_cfg(F3, NBCTL, &value);
3164 	value &= ~mask;
3165 	value |= s->old_nbctl;
3166 
3167 	amd64_write_pci_cfg(F3, NBCTL, value);
3168 
3169 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3170 	if (!s->flags.nb_ecc_prev) {
3171 		amd64_read_pci_cfg(F3, NBCFG, &value);
3172 		value &= ~NBCFG_ECC_ENABLE;
3173 		amd64_write_pci_cfg(F3, NBCFG, value);
3174 	}
3175 
3176 	/* restore the NB Enable MCGCTL bit */
3177 	if (toggle_ecc_err_reporting(s, nid, OFF))
3178 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3179 }
3180 
3181 /*
3182  * EDAC requires that the BIOS have ECC enabled before
3183  * taking over the processing of ECC errors. A command line
3184  * option allows to force-enable hardware ECC later in
3185  * enable_ecc_error_reporting().
3186  */
3187 static const char *ecc_msg =
3188 	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3189 	" Either enable ECC checking or force module loading by setting "
3190 	"'ecc_enable_override'.\n"
3191 	" (Note that use of the override may cause unknown side effects.)\n";
3192 
ecc_enabled(struct pci_dev * F3,u16 nid)3193 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3194 {
3195 	bool nb_mce_en = false;
3196 	u8 ecc_en = 0, i;
3197 	u32 value;
3198 
3199 	if (boot_cpu_data.x86 >= 0x17) {
3200 		u8 umc_en_mask = 0, ecc_en_mask = 0;
3201 
3202 		for_each_umc(i) {
3203 			u32 base = get_umc_base(i);
3204 
3205 			/* Only check enabled UMCs. */
3206 			if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3207 				continue;
3208 
3209 			if (!(value & UMC_SDP_INIT))
3210 				continue;
3211 
3212 			umc_en_mask |= BIT(i);
3213 
3214 			if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3215 				continue;
3216 
3217 			if (value & UMC_ECC_ENABLED)
3218 				ecc_en_mask |= BIT(i);
3219 		}
3220 
3221 		/* Check whether at least one UMC is enabled: */
3222 		if (umc_en_mask)
3223 			ecc_en = umc_en_mask == ecc_en_mask;
3224 		else
3225 			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3226 
3227 		/* Assume UMC MCA banks are enabled. */
3228 		nb_mce_en = true;
3229 	} else {
3230 		amd64_read_pci_cfg(F3, NBCFG, &value);
3231 
3232 		ecc_en = !!(value & NBCFG_ECC_ENABLE);
3233 
3234 		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3235 		if (!nb_mce_en)
3236 			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3237 				     MSR_IA32_MCG_CTL, nid);
3238 	}
3239 
3240 	amd64_info("Node %d: DRAM ECC %s.\n",
3241 		   nid, (ecc_en ? "enabled" : "disabled"));
3242 
3243 	if (!ecc_en || !nb_mce_en) {
3244 		amd64_info("%s", ecc_msg);
3245 		return false;
3246 	}
3247 	return true;
3248 }
3249 
3250 static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info * mci,struct amd64_pvt * pvt)3251 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3252 {
3253 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3254 
3255 	for_each_umc(i) {
3256 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3257 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3258 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3259 
3260 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3261 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3262 		}
3263 	}
3264 
3265 	/* Set chipkill only if ECC is enabled: */
3266 	if (ecc_en) {
3267 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3268 
3269 		if (!cpk_en)
3270 			return;
3271 
3272 		if (dev_x4)
3273 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3274 		else if (dev_x16)
3275 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3276 		else
3277 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3278 	}
3279 }
3280 
setup_mci_misc_attrs(struct mem_ctl_info * mci,struct amd64_family_type * fam)3281 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3282 				 struct amd64_family_type *fam)
3283 {
3284 	struct amd64_pvt *pvt = mci->pvt_info;
3285 
3286 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3287 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3288 
3289 	if (pvt->umc) {
3290 		f17h_determine_edac_ctl_cap(mci, pvt);
3291 	} else {
3292 		if (pvt->nbcap & NBCAP_SECDED)
3293 			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3294 
3295 		if (pvt->nbcap & NBCAP_CHIPKILL)
3296 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3297 	}
3298 
3299 	mci->edac_cap		= determine_edac_cap(pvt);
3300 	mci->mod_name		= EDAC_MOD_STR;
3301 	mci->ctl_name		= fam->ctl_name;
3302 	mci->dev_name		= pci_name(pvt->F3);
3303 	mci->ctl_page_to_phys	= NULL;
3304 
3305 	/* memory scrubber interface */
3306 	mci->set_sdram_scrub_rate = set_scrub_rate;
3307 	mci->get_sdram_scrub_rate = get_scrub_rate;
3308 }
3309 
3310 /*
3311  * returns a pointer to the family descriptor on success, NULL otherwise.
3312  */
per_family_init(struct amd64_pvt * pvt)3313 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3314 {
3315 	struct amd64_family_type *fam_type = NULL;
3316 
3317 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3318 	pvt->stepping	= boot_cpu_data.x86_stepping;
3319 	pvt->model	= boot_cpu_data.x86_model;
3320 	pvt->fam	= boot_cpu_data.x86;
3321 
3322 	switch (pvt->fam) {
3323 	case 0xf:
3324 		fam_type	= &family_types[K8_CPUS];
3325 		pvt->ops	= &family_types[K8_CPUS].ops;
3326 		break;
3327 
3328 	case 0x10:
3329 		fam_type	= &family_types[F10_CPUS];
3330 		pvt->ops	= &family_types[F10_CPUS].ops;
3331 		break;
3332 
3333 	case 0x15:
3334 		if (pvt->model == 0x30) {
3335 			fam_type = &family_types[F15_M30H_CPUS];
3336 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
3337 			break;
3338 		} else if (pvt->model == 0x60) {
3339 			fam_type = &family_types[F15_M60H_CPUS];
3340 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
3341 			break;
3342 		}
3343 
3344 		fam_type	= &family_types[F15_CPUS];
3345 		pvt->ops	= &family_types[F15_CPUS].ops;
3346 		break;
3347 
3348 	case 0x16:
3349 		if (pvt->model == 0x30) {
3350 			fam_type = &family_types[F16_M30H_CPUS];
3351 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
3352 			break;
3353 		}
3354 		fam_type	= &family_types[F16_CPUS];
3355 		pvt->ops	= &family_types[F16_CPUS].ops;
3356 		break;
3357 
3358 	case 0x17:
3359 		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3360 			fam_type = &family_types[F17_M10H_CPUS];
3361 			pvt->ops = &family_types[F17_M10H_CPUS].ops;
3362 			break;
3363 		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3364 			fam_type = &family_types[F17_M30H_CPUS];
3365 			pvt->ops = &family_types[F17_M30H_CPUS].ops;
3366 			break;
3367 		} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3368 			fam_type = &family_types[F17_M70H_CPUS];
3369 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
3370 			break;
3371 		}
3372 		/* fall through */
3373 	case 0x18:
3374 		fam_type	= &family_types[F17_CPUS];
3375 		pvt->ops	= &family_types[F17_CPUS].ops;
3376 
3377 		if (pvt->fam == 0x18)
3378 			family_types[F17_CPUS].ctl_name = "F18h";
3379 		break;
3380 
3381 	default:
3382 		amd64_err("Unsupported family!\n");
3383 		return NULL;
3384 	}
3385 
3386 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3387 		     (pvt->fam == 0xf ?
3388 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
3389 							     : "revE or earlier ")
3390 				 : ""), pvt->mc_node_id);
3391 	return fam_type;
3392 }
3393 
3394 static const struct attribute_group *amd64_edac_attr_groups[] = {
3395 #ifdef CONFIG_EDAC_DEBUG
3396 	&amd64_edac_dbg_group,
3397 #endif
3398 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3399 	&amd64_edac_inj_group,
3400 #endif
3401 	NULL
3402 };
3403 
3404 /* Set the number of Unified Memory Controllers in the system. */
compute_num_umcs(void)3405 static void compute_num_umcs(void)
3406 {
3407 	u8 model = boot_cpu_data.x86_model;
3408 
3409 	if (boot_cpu_data.x86 < 0x17)
3410 		return;
3411 
3412 	if (model >= 0x30 && model <= 0x3f)
3413 		num_umcs = 8;
3414 	else
3415 		num_umcs = 2;
3416 
3417 	edac_dbg(1, "Number of UMCs: %x", num_umcs);
3418 }
3419 
init_one_instance(unsigned int nid)3420 static int init_one_instance(unsigned int nid)
3421 {
3422 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3423 	struct amd64_family_type *fam_type = NULL;
3424 	struct mem_ctl_info *mci = NULL;
3425 	struct edac_mc_layer layers[2];
3426 	struct amd64_pvt *pvt = NULL;
3427 	u16 pci_id1, pci_id2;
3428 	int err = 0, ret;
3429 
3430 	ret = -ENOMEM;
3431 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3432 	if (!pvt)
3433 		goto err_ret;
3434 
3435 	pvt->mc_node_id	= nid;
3436 	pvt->F3 = F3;
3437 
3438 	ret = -EINVAL;
3439 	fam_type = per_family_init(pvt);
3440 	if (!fam_type)
3441 		goto err_free;
3442 
3443 	if (pvt->fam >= 0x17) {
3444 		pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
3445 		if (!pvt->umc) {
3446 			ret = -ENOMEM;
3447 			goto err_free;
3448 		}
3449 
3450 		pci_id1 = fam_type->f0_id;
3451 		pci_id2 = fam_type->f6_id;
3452 	} else {
3453 		pci_id1 = fam_type->f1_id;
3454 		pci_id2 = fam_type->f2_id;
3455 	}
3456 
3457 	err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3458 	if (err)
3459 		goto err_post_init;
3460 
3461 	read_mc_regs(pvt);
3462 
3463 	/*
3464 	 * We need to determine how many memory channels there are. Then use
3465 	 * that information for calculating the size of the dynamic instance
3466 	 * tables in the 'mci' structure.
3467 	 */
3468 	ret = -EINVAL;
3469 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
3470 	if (pvt->channel_count < 0)
3471 		goto err_siblings;
3472 
3473 	ret = -ENOMEM;
3474 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3475 	layers[0].size = pvt->csels[0].b_cnt;
3476 	layers[0].is_virt_csrow = true;
3477 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3478 
3479 	/*
3480 	 * Always allocate two channels since we can have setups with DIMMs on
3481 	 * only one channel. Also, this simplifies handling later for the price
3482 	 * of a couple of KBs tops.
3483 	 *
3484 	 * On Fam17h+, the number of controllers may be greater than two. So set
3485 	 * the size equal to the maximum number of UMCs.
3486 	 */
3487 	if (pvt->fam >= 0x17)
3488 		layers[1].size = num_umcs;
3489 	else
3490 		layers[1].size = 2;
3491 	layers[1].is_virt_csrow = false;
3492 
3493 	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
3494 	if (!mci)
3495 		goto err_siblings;
3496 
3497 	mci->pvt_info = pvt;
3498 	mci->pdev = &pvt->F3->dev;
3499 
3500 	setup_mci_misc_attrs(mci, fam_type);
3501 
3502 	if (init_csrows(mci))
3503 		mci->edac_cap = EDAC_FLAG_NONE;
3504 
3505 	ret = -ENODEV;
3506 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3507 		edac_dbg(1, "failed edac_mc_add_mc()\n");
3508 		goto err_add_mc;
3509 	}
3510 
3511 	return 0;
3512 
3513 err_add_mc:
3514 	edac_mc_free(mci);
3515 
3516 err_siblings:
3517 	free_mc_sibling_devs(pvt);
3518 
3519 err_post_init:
3520 	if (pvt->fam >= 0x17)
3521 		kfree(pvt->umc);
3522 
3523 err_free:
3524 	kfree(pvt);
3525 
3526 err_ret:
3527 	return ret;
3528 }
3529 
probe_one_instance(unsigned int nid)3530 static int probe_one_instance(unsigned int nid)
3531 {
3532 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3533 	struct ecc_settings *s;
3534 	int ret;
3535 
3536 	ret = -ENOMEM;
3537 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3538 	if (!s)
3539 		goto err_out;
3540 
3541 	ecc_stngs[nid] = s;
3542 
3543 	if (!ecc_enabled(F3, nid)) {
3544 		ret = 0;
3545 
3546 		if (!ecc_enable_override)
3547 			goto err_enable;
3548 
3549 		if (boot_cpu_data.x86 >= 0x17) {
3550 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3551 			goto err_enable;
3552 		} else
3553 			amd64_warn("Forcing ECC on!\n");
3554 
3555 		if (!enable_ecc_error_reporting(s, nid, F3))
3556 			goto err_enable;
3557 	}
3558 
3559 	ret = init_one_instance(nid);
3560 	if (ret < 0) {
3561 		amd64_err("Error probing instance: %d\n", nid);
3562 
3563 		if (boot_cpu_data.x86 < 0x17)
3564 			restore_ecc_error_reporting(s, nid, F3);
3565 
3566 		goto err_enable;
3567 	}
3568 
3569 	return ret;
3570 
3571 err_enable:
3572 	kfree(s);
3573 	ecc_stngs[nid] = NULL;
3574 
3575 err_out:
3576 	return ret;
3577 }
3578 
remove_one_instance(unsigned int nid)3579 static void remove_one_instance(unsigned int nid)
3580 {
3581 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3582 	struct ecc_settings *s = ecc_stngs[nid];
3583 	struct mem_ctl_info *mci;
3584 	struct amd64_pvt *pvt;
3585 
3586 	mci = find_mci_by_dev(&F3->dev);
3587 	WARN_ON(!mci);
3588 
3589 	/* Remove from EDAC CORE tracking list */
3590 	mci = edac_mc_del_mc(&F3->dev);
3591 	if (!mci)
3592 		return;
3593 
3594 	pvt = mci->pvt_info;
3595 
3596 	restore_ecc_error_reporting(s, nid, F3);
3597 
3598 	free_mc_sibling_devs(pvt);
3599 
3600 	kfree(ecc_stngs[nid]);
3601 	ecc_stngs[nid] = NULL;
3602 
3603 	/* Free the EDAC CORE resources */
3604 	mci->pvt_info = NULL;
3605 
3606 	kfree(pvt);
3607 	edac_mc_free(mci);
3608 }
3609 
setup_pci_device(void)3610 static void setup_pci_device(void)
3611 {
3612 	struct mem_ctl_info *mci;
3613 	struct amd64_pvt *pvt;
3614 
3615 	if (pci_ctl)
3616 		return;
3617 
3618 	mci = edac_mc_find(0);
3619 	if (!mci)
3620 		return;
3621 
3622 	pvt = mci->pvt_info;
3623 	if (pvt->umc)
3624 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3625 	else
3626 		pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3627 	if (!pci_ctl) {
3628 		pr_warn("%s(): Unable to create PCI control\n", __func__);
3629 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3630 	}
3631 }
3632 
3633 static const struct x86_cpu_id amd64_cpuids[] = {
3634 	{ X86_VENDOR_AMD, 0xF,	X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3635 	{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3636 	{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3637 	{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3638 	{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
3639 	{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3640 	{ }
3641 };
3642 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3643 
amd64_edac_init(void)3644 static int __init amd64_edac_init(void)
3645 {
3646 	const char *owner;
3647 	int err = -ENODEV;
3648 	int i;
3649 
3650 	owner = edac_get_owner();
3651 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3652 		return -EBUSY;
3653 
3654 	if (!x86_match_cpu(amd64_cpuids))
3655 		return -ENODEV;
3656 
3657 	if (amd_cache_northbridges() < 0)
3658 		return -ENODEV;
3659 
3660 	opstate_init();
3661 
3662 	err = -ENOMEM;
3663 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3664 	if (!ecc_stngs)
3665 		goto err_free;
3666 
3667 	msrs = msrs_alloc();
3668 	if (!msrs)
3669 		goto err_free;
3670 
3671 	compute_num_umcs();
3672 
3673 	for (i = 0; i < amd_nb_num(); i++) {
3674 		err = probe_one_instance(i);
3675 		if (err) {
3676 			/* unwind properly */
3677 			while (--i >= 0)
3678 				remove_one_instance(i);
3679 
3680 			goto err_pci;
3681 		}
3682 	}
3683 
3684 	if (!edac_has_mcs()) {
3685 		err = -ENODEV;
3686 		goto err_pci;
3687 	}
3688 
3689 	/* register stuff with EDAC MCE */
3690 	if (report_gart_errors)
3691 		amd_report_gart_errors(true);
3692 
3693 	if (boot_cpu_data.x86 >= 0x17)
3694 		amd_register_ecc_decoder(decode_umc_error);
3695 	else
3696 		amd_register_ecc_decoder(decode_bus_error);
3697 
3698 	setup_pci_device();
3699 
3700 #ifdef CONFIG_X86_32
3701 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3702 #endif
3703 
3704 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3705 
3706 	return 0;
3707 
3708 err_pci:
3709 	msrs_free(msrs);
3710 	msrs = NULL;
3711 
3712 err_free:
3713 	kfree(ecc_stngs);
3714 	ecc_stngs = NULL;
3715 
3716 	return err;
3717 }
3718 
amd64_edac_exit(void)3719 static void __exit amd64_edac_exit(void)
3720 {
3721 	int i;
3722 
3723 	if (pci_ctl)
3724 		edac_pci_release_generic_ctl(pci_ctl);
3725 
3726 	/* unregister from EDAC MCE */
3727 	amd_report_gart_errors(false);
3728 
3729 	if (boot_cpu_data.x86 >= 0x17)
3730 		amd_unregister_ecc_decoder(decode_umc_error);
3731 	else
3732 		amd_unregister_ecc_decoder(decode_bus_error);
3733 
3734 	for (i = 0; i < amd_nb_num(); i++)
3735 		remove_one_instance(i);
3736 
3737 	kfree(ecc_stngs);
3738 	ecc_stngs = NULL;
3739 
3740 	msrs_free(msrs);
3741 	msrs = NULL;
3742 }
3743 
3744 module_init(amd64_edac_init);
3745 module_exit(amd64_edac_exit);
3746 
3747 MODULE_LICENSE("GPL");
3748 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3749 		"Dave Peterson, Thayne Harbaugh");
3750 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3751 		EDAC_AMD64_VERSION);
3752 
3753 module_param(edac_op_state, int, 0444);
3754 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3755