1  // SPDX-License-Identifier: GPL-2.0-only
2  /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
3   *
4   * This driver supports the memory controllers found on the Intel
5   * processor family Sandy Bridge.
6   *
7   * Copyright (c) 2011 by:
8   *	 Mauro Carvalho Chehab
9   */
10  
11  #include <linux/module.h>
12  #include <linux/init.h>
13  #include <linux/pci.h>
14  #include <linux/pci_ids.h>
15  #include <linux/slab.h>
16  #include <linux/delay.h>
17  #include <linux/edac.h>
18  #include <linux/mmzone.h>
19  #include <linux/smp.h>
20  #include <linux/bitmap.h>
21  #include <linux/math64.h>
22  #include <linux/mod_devicetable.h>
23  #include <asm/cpu_device_id.h>
24  #include <asm/intel-family.h>
25  #include <asm/processor.h>
26  #include <asm/mce.h>
27  
28  #include "edac_module.h"
29  
30  /* Static vars */
31  static LIST_HEAD(sbridge_edac_list);
32  
33  /*
34   * Alter this version for the module when modifications are made
35   */
36  #define SBRIDGE_REVISION    " Ver: 1.1.2 "
37  #define EDAC_MOD_STR	    "sb_edac"
38  
39  /*
40   * Debug macros
41   */
42  #define sbridge_printk(level, fmt, arg...)			\
43  	edac_printk(level, "sbridge", fmt, ##arg)
44  
45  #define sbridge_mc_printk(mci, level, fmt, arg...)		\
46  	edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
47  
48  /*
49   * Get a bit field at register value <v>, from bit <lo> to bit <hi>
50   */
51  #define GET_BITFIELD(v, lo, hi)	\
52  	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
53  
54  /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
55  static const u32 sbridge_dram_rule[] = {
56  	0x80, 0x88, 0x90, 0x98, 0xa0,
57  	0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
58  };
59  
60  static const u32 ibridge_dram_rule[] = {
61  	0x60, 0x68, 0x70, 0x78, 0x80,
62  	0x88, 0x90, 0x98, 0xa0,	0xa8,
63  	0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
64  	0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
65  };
66  
67  static const u32 knl_dram_rule[] = {
68  	0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
69  	0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
70  	0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
71  	0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
72  	0x100, 0x108, 0x110, 0x118,   /* 20-23 */
73  };
74  
75  #define DRAM_RULE_ENABLE(reg)	GET_BITFIELD(reg, 0,  0)
76  #define A7MODE(reg)		GET_BITFIELD(reg, 26, 26)
77  
show_dram_attr(u32 attr)78  static char *show_dram_attr(u32 attr)
79  {
80  	switch (attr) {
81  		case 0:
82  			return "DRAM";
83  		case 1:
84  			return "MMCFG";
85  		case 2:
86  			return "NXM";
87  		default:
88  			return "unknown";
89  	}
90  }
91  
92  static const u32 sbridge_interleave_list[] = {
93  	0x84, 0x8c, 0x94, 0x9c, 0xa4,
94  	0xac, 0xb4, 0xbc, 0xc4, 0xcc,
95  };
96  
97  static const u32 ibridge_interleave_list[] = {
98  	0x64, 0x6c, 0x74, 0x7c, 0x84,
99  	0x8c, 0x94, 0x9c, 0xa4, 0xac,
100  	0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
101  	0xdc, 0xe4, 0xec, 0xf4, 0xfc,
102  };
103  
104  static const u32 knl_interleave_list[] = {
105  	0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
106  	0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
107  	0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
108  	0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
109  	0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
110  };
111  #define MAX_INTERLEAVE							\
112  	(max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list),	\
113  	       max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list),	\
114  		     ARRAY_SIZE(knl_interleave_list))))
115  
116  struct interleave_pkg {
117  	unsigned char start;
118  	unsigned char end;
119  };
120  
121  static const struct interleave_pkg sbridge_interleave_pkg[] = {
122  	{ 0, 2 },
123  	{ 3, 5 },
124  	{ 8, 10 },
125  	{ 11, 13 },
126  	{ 16, 18 },
127  	{ 19, 21 },
128  	{ 24, 26 },
129  	{ 27, 29 },
130  };
131  
132  static const struct interleave_pkg ibridge_interleave_pkg[] = {
133  	{ 0, 3 },
134  	{ 4, 7 },
135  	{ 8, 11 },
136  	{ 12, 15 },
137  	{ 16, 19 },
138  	{ 20, 23 },
139  	{ 24, 27 },
140  	{ 28, 31 },
141  };
142  
sad_pkg(const struct interleave_pkg * table,u32 reg,int interleave)143  static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
144  			  int interleave)
145  {
146  	return GET_BITFIELD(reg, table[interleave].start,
147  			    table[interleave].end);
148  }
149  
150  /* Devices 12 Function 7 */
151  
152  #define TOLM		0x80
153  #define TOHM		0x84
154  #define HASWELL_TOLM	0xd0
155  #define HASWELL_TOHM_0	0xd4
156  #define HASWELL_TOHM_1	0xd8
157  #define KNL_TOLM	0xd0
158  #define KNL_TOHM_0	0xd4
159  #define KNL_TOHM_1	0xd8
160  
161  #define GET_TOLM(reg)		((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
162  #define GET_TOHM(reg)		((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
163  
164  /* Device 13 Function 6 */
165  
166  #define SAD_TARGET	0xf0
167  
168  #define SOURCE_ID(reg)		GET_BITFIELD(reg, 9, 11)
169  
170  #define SOURCE_ID_KNL(reg)	GET_BITFIELD(reg, 12, 14)
171  
172  #define SAD_CONTROL	0xf4
173  
174  /* Device 14 function 0 */
175  
176  static const u32 tad_dram_rule[] = {
177  	0x40, 0x44, 0x48, 0x4c,
178  	0x50, 0x54, 0x58, 0x5c,
179  	0x60, 0x64, 0x68, 0x6c,
180  };
181  #define MAX_TAD	ARRAY_SIZE(tad_dram_rule)
182  
183  #define TAD_LIMIT(reg)		((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
184  #define TAD_SOCK(reg)		GET_BITFIELD(reg, 10, 11)
185  #define TAD_CH(reg)		GET_BITFIELD(reg,  8,  9)
186  #define TAD_TGT3(reg)		GET_BITFIELD(reg,  6,  7)
187  #define TAD_TGT2(reg)		GET_BITFIELD(reg,  4,  5)
188  #define TAD_TGT1(reg)		GET_BITFIELD(reg,  2,  3)
189  #define TAD_TGT0(reg)		GET_BITFIELD(reg,  0,  1)
190  
191  /* Device 15, function 0 */
192  
193  #define MCMTR			0x7c
194  #define KNL_MCMTR		0x624
195  
196  #define IS_ECC_ENABLED(mcmtr)		GET_BITFIELD(mcmtr, 2, 2)
197  #define IS_LOCKSTEP_ENABLED(mcmtr)	GET_BITFIELD(mcmtr, 1, 1)
198  #define IS_CLOSE_PG(mcmtr)		GET_BITFIELD(mcmtr, 0, 0)
199  
200  /* Device 15, function 1 */
201  
202  #define RASENABLES		0xac
203  #define IS_MIRROR_ENABLED(reg)		GET_BITFIELD(reg, 0, 0)
204  
205  /* Device 15, functions 2-5 */
206  
207  static const int mtr_regs[] = {
208  	0x80, 0x84, 0x88,
209  };
210  
211  static const int knl_mtr_reg = 0xb60;
212  
213  #define RANK_DISABLE(mtr)		GET_BITFIELD(mtr, 16, 19)
214  #define IS_DIMM_PRESENT(mtr)		GET_BITFIELD(mtr, 14, 14)
215  #define RANK_CNT_BITS(mtr)		GET_BITFIELD(mtr, 12, 13)
216  #define RANK_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 2, 4)
217  #define COL_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 0, 1)
218  
219  static const u32 tad_ch_nilv_offset[] = {
220  	0x90, 0x94, 0x98, 0x9c,
221  	0xa0, 0xa4, 0xa8, 0xac,
222  	0xb0, 0xb4, 0xb8, 0xbc,
223  };
224  #define CHN_IDX_OFFSET(reg)		GET_BITFIELD(reg, 28, 29)
225  #define TAD_OFFSET(reg)			(GET_BITFIELD(reg,  6, 25) << 26)
226  
227  static const u32 rir_way_limit[] = {
228  	0x108, 0x10c, 0x110, 0x114, 0x118,
229  };
230  #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
231  
232  #define IS_RIR_VALID(reg)	GET_BITFIELD(reg, 31, 31)
233  #define RIR_WAY(reg)		GET_BITFIELD(reg, 28, 29)
234  
235  #define MAX_RIR_WAY	8
236  
237  static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
238  	{ 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
239  	{ 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
240  	{ 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
241  	{ 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
242  	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
243  };
244  
245  #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
246  	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
247  
248  #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
249  	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
250  
251  /* Device 16, functions 2-7 */
252  
253  /*
254   * FIXME: Implement the error count reads directly
255   */
256  
257  #define RANK_ODD_OV(reg)		GET_BITFIELD(reg, 31, 31)
258  #define RANK_ODD_ERR_CNT(reg)		GET_BITFIELD(reg, 16, 30)
259  #define RANK_EVEN_OV(reg)		GET_BITFIELD(reg, 15, 15)
260  #define RANK_EVEN_ERR_CNT(reg)		GET_BITFIELD(reg,  0, 14)
261  
262  #if 0 /* Currently unused*/
263  static const u32 correrrcnt[] = {
264  	0x104, 0x108, 0x10c, 0x110,
265  };
266  
267  static const u32 correrrthrsld[] = {
268  	0x11c, 0x120, 0x124, 0x128,
269  };
270  #endif
271  
272  #define RANK_ODD_ERR_THRSLD(reg)	GET_BITFIELD(reg, 16, 30)
273  #define RANK_EVEN_ERR_THRSLD(reg)	GET_BITFIELD(reg,  0, 14)
274  
275  
276  /* Device 17, function 0 */
277  
278  #define SB_RANK_CFG_A		0x0328
279  
280  #define IB_RANK_CFG_A		0x0320
281  
282  /*
283   * sbridge structs
284   */
285  
286  #define NUM_CHANNELS		6	/* Max channels per MC */
287  #define MAX_DIMMS		3	/* Max DIMMS per channel */
288  #define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
289  #define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
290  #define KNL_MAX_EDCS		8	/* Embedded DRAM controllers */
291  #define CHANNEL_UNSPECIFIED	0xf	/* Intel IA32 SDM 15-14 */
292  
293  enum type {
294  	SANDY_BRIDGE,
295  	IVY_BRIDGE,
296  	HASWELL,
297  	BROADWELL,
298  	KNIGHTS_LANDING,
299  };
300  
301  enum domain {
302  	IMC0 = 0,
303  	IMC1,
304  	SOCK,
305  };
306  
307  enum mirroring_mode {
308  	NON_MIRRORING,
309  	ADDR_RANGE_MIRRORING,
310  	FULL_MIRRORING,
311  };
312  
313  struct sbridge_pvt;
314  struct sbridge_info {
315  	enum type	type;
316  	u32		mcmtr;
317  	u32		rankcfgr;
318  	u64		(*get_tolm)(struct sbridge_pvt *pvt);
319  	u64		(*get_tohm)(struct sbridge_pvt *pvt);
320  	u64		(*rir_limit)(u32 reg);
321  	u64		(*sad_limit)(u32 reg);
322  	u32		(*interleave_mode)(u32 reg);
323  	u32		(*dram_attr)(u32 reg);
324  	const u32	*dram_rule;
325  	const u32	*interleave_list;
326  	const struct interleave_pkg *interleave_pkg;
327  	u8		max_sad;
328  	u8		(*get_node_id)(struct sbridge_pvt *pvt);
329  	u8		(*get_ha)(u8 bank);
330  	enum mem_type	(*get_memory_type)(struct sbridge_pvt *pvt);
331  	enum dev_type	(*get_width)(struct sbridge_pvt *pvt, u32 mtr);
332  	struct pci_dev	*pci_vtd;
333  };
334  
335  struct sbridge_channel {
336  	u32		ranks;
337  	u32		dimms;
338  	struct dimm {
339  		u32 rowbits;
340  		u32 colbits;
341  		u32 bank_xor_enable;
342  		u32 amap_fine;
343  	} dimm[MAX_DIMMS];
344  };
345  
346  struct pci_id_descr {
347  	int			dev_id;
348  	int			optional;
349  	enum domain		dom;
350  };
351  
352  struct pci_id_table {
353  	const struct pci_id_descr	*descr;
354  	int				n_devs_per_imc;
355  	int				n_devs_per_sock;
356  	int				n_imcs_per_sock;
357  	enum type			type;
358  };
359  
360  struct sbridge_dev {
361  	struct list_head	list;
362  	int			seg;
363  	u8			bus, mc;
364  	u8			node_id, source_id;
365  	struct pci_dev		**pdev;
366  	enum domain		dom;
367  	int			n_devs;
368  	int			i_devs;
369  	struct mem_ctl_info	*mci;
370  };
371  
372  struct knl_pvt {
373  	struct pci_dev          *pci_cha[KNL_MAX_CHAS];
374  	struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
375  	struct pci_dev          *pci_mc0;
376  	struct pci_dev          *pci_mc1;
377  	struct pci_dev          *pci_mc0_misc;
378  	struct pci_dev          *pci_mc1_misc;
379  	struct pci_dev          *pci_mc_info; /* tolm, tohm */
380  };
381  
382  struct sbridge_pvt {
383  	/* Devices per socket */
384  	struct pci_dev		*pci_ddrio;
385  	struct pci_dev		*pci_sad0, *pci_sad1;
386  	struct pci_dev		*pci_br0, *pci_br1;
387  	/* Devices per memory controller */
388  	struct pci_dev		*pci_ha, *pci_ta, *pci_ras;
389  	struct pci_dev		*pci_tad[NUM_CHANNELS];
390  
391  	struct sbridge_dev	*sbridge_dev;
392  
393  	struct sbridge_info	info;
394  	struct sbridge_channel	channel[NUM_CHANNELS];
395  
396  	/* Memory type detection */
397  	bool			is_cur_addr_mirrored, is_lockstep, is_close_pg;
398  	bool			is_chan_hash;
399  	enum mirroring_mode	mirror_mode;
400  
401  	/* Memory description */
402  	u64			tolm, tohm;
403  	struct knl_pvt knl;
404  };
405  
406  #define PCI_DESCR(device_id, opt, domain)	\
407  	.dev_id = (device_id),		\
408  	.optional = opt,	\
409  	.dom = domain
410  
411  static const struct pci_id_descr pci_dev_descr_sbridge[] = {
412  		/* Processor Home Agent */
413  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
414  
415  		/* Memory controller */
416  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
417  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
418  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
419  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
420  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
421  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
422  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
423  
424  		/* System Address Decoder */
425  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
426  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
427  
428  		/* Broadcast Registers */
429  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
430  };
431  
432  #define PCI_ID_TABLE_ENTRY(A, N, M, T) {	\
433  	.descr = A,			\
434  	.n_devs_per_imc = N,	\
435  	.n_devs_per_sock = ARRAY_SIZE(A),	\
436  	.n_imcs_per_sock = M,	\
437  	.type = T			\
438  }
439  
440  static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
441  	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
442  	{0,}			/* 0 terminated list. */
443  };
444  
445  /* This changes depending if 1HA or 2HA:
446   * 1HA:
447   *	0x0eb8 (17.0) is DDRIO0
448   * 2HA:
449   *	0x0ebc (17.4) is DDRIO0
450   */
451  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0	0x0eb8
452  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0	0x0ebc
453  
454  /* pci ids */
455  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0		0x0ea0
456  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA		0x0ea8
457  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS		0x0e71
458  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0	0x0eaa
459  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1	0x0eab
460  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2	0x0eac
461  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3	0x0ead
462  #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD			0x0ec8
463  #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0			0x0ec9
464  #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1			0x0eca
465  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1		0x0e60
466  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA		0x0e68
467  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS		0x0e79
468  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0	0x0e6a
469  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1	0x0e6b
470  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2	0x0e6c
471  #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3	0x0e6d
472  
473  static const struct pci_id_descr pci_dev_descr_ibridge[] = {
474  		/* Processor Home Agent */
475  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
476  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
477  
478  		/* Memory controller */
479  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
480  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
481  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
482  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
483  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
484  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
485  
486  		/* Optional, mode 2HA */
487  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
488  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
489  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
490  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
491  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
492  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
493  
494  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
495  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
496  
497  		/* System Address Decoder */
498  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
499  
500  		/* Broadcast Registers */
501  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
502  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
503  
504  };
505  
506  static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
507  	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
508  	{0,}			/* 0 terminated list. */
509  };
510  
511  /* Haswell support */
512  /* EN processor:
513   *	- 1 IMC
514   *	- 3 DDR3 channels, 2 DPC per channel
515   * EP processor:
516   *	- 1 or 2 IMC
517   *	- 4 DDR4 channels, 3 DPC per channel
518   * EP 4S processor:
519   *	- 2 IMC
520   *	- 4 DDR4 channels, 3 DPC per channel
521   * EX processor:
522   *	- 2 IMC
523   *	- each IMC interfaces with a SMI 2 channel
524   *	- each SMI channel interfaces with a scalable memory buffer
525   *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
526   */
527  #define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
528  #define HASWELL_HASYSDEFEATURE2 0x84
529  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
530  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0	0x2fa0
531  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1	0x2f60
532  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA	0x2fa8
533  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM	0x2f71
534  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA	0x2f68
535  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM	0x2f79
536  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
537  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
538  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
539  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
540  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
541  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
542  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
543  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
544  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
545  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
546  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
547  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
548  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
549  #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
550  static const struct pci_id_descr pci_dev_descr_haswell[] = {
551  	/* first item must be the HA */
552  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
553  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
554  
555  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
556  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
557  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
558  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
559  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
560  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
561  
562  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
563  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
564  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
565  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
566  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
567  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
568  
569  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
570  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
571  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
572  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
573  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
574  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
575  };
576  
577  static const struct pci_id_table pci_dev_descr_haswell_table[] = {
578  	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
579  	{0,}			/* 0 terminated list. */
580  };
581  
582  /* Knight's Landing Support */
583  /*
584   * KNL's memory channels are swizzled between memory controllers.
585   * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
586   */
587  #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
588  
589  /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
590  #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
591  /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
592  #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
593  /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
594  #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
595  /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
596  #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
597  /* SAD target - 1-29-1 (1 of these) */
598  #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
599  /* Caching / Home Agent */
600  #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
601  /* Device with TOLM and TOHM, 0-5-0 (1 of these) */
602  #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
603  
604  /*
605   * KNL differs from SB, IB, and Haswell in that it has multiple
606   * instances of the same device with the same device ID, so we handle that
607   * by creating as many copies in the table as we expect to find.
608   * (Like device ID must be grouped together.)
609   */
610  
611  static const struct pci_id_descr pci_dev_descr_knl[] = {
612  	[0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
613  	[2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
614  	[8]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
615  	[9]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
616  	[10]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
617  	[11]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
618  	[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
619  };
620  
621  static const struct pci_id_table pci_dev_descr_knl_table[] = {
622  	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
623  	{0,}
624  };
625  
626  /*
627   * Broadwell support
628   *
629   * DE processor:
630   *	- 1 IMC
631   *	- 2 DDR3 channels, 2 DPC per channel
632   * EP processor:
633   *	- 1 or 2 IMC
634   *	- 4 DDR4 channels, 3 DPC per channel
635   * EP 4S processor:
636   *	- 2 IMC
637   *	- 4 DDR4 channels, 3 DPC per channel
638   * EX processor:
639   *	- 2 IMC
640   *	- each IMC interfaces with a SMI 2 channel
641   *	- each SMI channel interfaces with a scalable memory buffer
642   *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
643   */
644  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
645  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0	0x6fa0
646  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1	0x6f60
647  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA	0x6fa8
648  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM	0x6f71
649  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA	0x6f68
650  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM	0x6f79
651  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
652  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
653  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
654  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
655  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
656  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
657  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
658  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
659  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
660  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
661  #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
662  
663  static const struct pci_id_descr pci_dev_descr_broadwell[] = {
664  	/* first item must be the HA */
665  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
666  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
667  
668  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
669  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
670  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
671  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
672  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
673  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
674  
675  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
676  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
677  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
678  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
679  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
680  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
681  
682  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
683  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
684  	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
685  };
686  
687  static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
688  	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
689  	{0,}			/* 0 terminated list. */
690  };
691  
692  
693  /****************************************************************************
694  			Ancillary status routines
695   ****************************************************************************/
696  
numrank(enum type type,u32 mtr)697  static inline int numrank(enum type type, u32 mtr)
698  {
699  	int ranks = (1 << RANK_CNT_BITS(mtr));
700  	int max = 4;
701  
702  	if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
703  		max = 8;
704  
705  	if (ranks > max) {
706  		edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
707  			 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
708  		return -EINVAL;
709  	}
710  
711  	return ranks;
712  }
713  
numrow(u32 mtr)714  static inline int numrow(u32 mtr)
715  {
716  	int rows = (RANK_WIDTH_BITS(mtr) + 12);
717  
718  	if (rows < 13 || rows > 18) {
719  		edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
720  			 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
721  		return -EINVAL;
722  	}
723  
724  	return 1 << rows;
725  }
726  
numcol(u32 mtr)727  static inline int numcol(u32 mtr)
728  {
729  	int cols = (COL_WIDTH_BITS(mtr) + 10);
730  
731  	if (cols > 12) {
732  		edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
733  			 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
734  		return -EINVAL;
735  	}
736  
737  	return 1 << cols;
738  }
739  
get_sbridge_dev(int seg,u8 bus,enum domain dom,int multi_bus,struct sbridge_dev * prev)740  static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
741  					   int multi_bus,
742  					   struct sbridge_dev *prev)
743  {
744  	struct sbridge_dev *sbridge_dev;
745  
746  	/*
747  	 * If we have devices scattered across several busses that pertain
748  	 * to the same memory controller, we'll lump them all together.
749  	 */
750  	if (multi_bus) {
751  		return list_first_entry_or_null(&sbridge_edac_list,
752  				struct sbridge_dev, list);
753  	}
754  
755  	sbridge_dev = list_entry(prev ? prev->list.next
756  				      : sbridge_edac_list.next, struct sbridge_dev, list);
757  
758  	list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
759  		if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
760  				(dom == SOCK || dom == sbridge_dev->dom))
761  			return sbridge_dev;
762  	}
763  
764  	return NULL;
765  }
766  
alloc_sbridge_dev(int seg,u8 bus,enum domain dom,const struct pci_id_table * table)767  static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
768  					     const struct pci_id_table *table)
769  {
770  	struct sbridge_dev *sbridge_dev;
771  
772  	sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
773  	if (!sbridge_dev)
774  		return NULL;
775  
776  	sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
777  				    sizeof(*sbridge_dev->pdev),
778  				    GFP_KERNEL);
779  	if (!sbridge_dev->pdev) {
780  		kfree(sbridge_dev);
781  		return NULL;
782  	}
783  
784  	sbridge_dev->seg = seg;
785  	sbridge_dev->bus = bus;
786  	sbridge_dev->dom = dom;
787  	sbridge_dev->n_devs = table->n_devs_per_imc;
788  	list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
789  
790  	return sbridge_dev;
791  }
792  
free_sbridge_dev(struct sbridge_dev * sbridge_dev)793  static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
794  {
795  	list_del(&sbridge_dev->list);
796  	kfree(sbridge_dev->pdev);
797  	kfree(sbridge_dev);
798  }
799  
sbridge_get_tolm(struct sbridge_pvt * pvt)800  static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
801  {
802  	u32 reg;
803  
804  	/* Address range is 32:28 */
805  	pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
806  	return GET_TOLM(reg);
807  }
808  
sbridge_get_tohm(struct sbridge_pvt * pvt)809  static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
810  {
811  	u32 reg;
812  
813  	pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
814  	return GET_TOHM(reg);
815  }
816  
ibridge_get_tolm(struct sbridge_pvt * pvt)817  static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
818  {
819  	u32 reg;
820  
821  	pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
822  
823  	return GET_TOLM(reg);
824  }
825  
ibridge_get_tohm(struct sbridge_pvt * pvt)826  static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
827  {
828  	u32 reg;
829  
830  	pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
831  
832  	return GET_TOHM(reg);
833  }
834  
rir_limit(u32 reg)835  static u64 rir_limit(u32 reg)
836  {
837  	return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
838  }
839  
sad_limit(u32 reg)840  static u64 sad_limit(u32 reg)
841  {
842  	return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
843  }
844  
interleave_mode(u32 reg)845  static u32 interleave_mode(u32 reg)
846  {
847  	return GET_BITFIELD(reg, 1, 1);
848  }
849  
dram_attr(u32 reg)850  static u32 dram_attr(u32 reg)
851  {
852  	return GET_BITFIELD(reg, 2, 3);
853  }
854  
knl_sad_limit(u32 reg)855  static u64 knl_sad_limit(u32 reg)
856  {
857  	return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
858  }
859  
knl_interleave_mode(u32 reg)860  static u32 knl_interleave_mode(u32 reg)
861  {
862  	return GET_BITFIELD(reg, 1, 2);
863  }
864  
865  static const char * const knl_intlv_mode[] = {
866  	"[8:6]", "[10:8]", "[14:12]", "[32:30]"
867  };
868  
get_intlv_mode_str(u32 reg,enum type t)869  static const char *get_intlv_mode_str(u32 reg, enum type t)
870  {
871  	if (t == KNIGHTS_LANDING)
872  		return knl_intlv_mode[knl_interleave_mode(reg)];
873  	else
874  		return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
875  }
876  
dram_attr_knl(u32 reg)877  static u32 dram_attr_knl(u32 reg)
878  {
879  	return GET_BITFIELD(reg, 3, 4);
880  }
881  
882  
get_memory_type(struct sbridge_pvt * pvt)883  static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
884  {
885  	u32 reg;
886  	enum mem_type mtype;
887  
888  	if (pvt->pci_ddrio) {
889  		pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
890  				      &reg);
891  		if (GET_BITFIELD(reg, 11, 11))
892  			/* FIXME: Can also be LRDIMM */
893  			mtype = MEM_RDDR3;
894  		else
895  			mtype = MEM_DDR3;
896  	} else
897  		mtype = MEM_UNKNOWN;
898  
899  	return mtype;
900  }
901  
haswell_get_memory_type(struct sbridge_pvt * pvt)902  static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
903  {
904  	u32 reg;
905  	bool registered = false;
906  	enum mem_type mtype = MEM_UNKNOWN;
907  
908  	if (!pvt->pci_ddrio)
909  		goto out;
910  
911  	pci_read_config_dword(pvt->pci_ddrio,
912  			      HASWELL_DDRCRCLKCONTROLS, &reg);
913  	/* Is_Rdimm */
914  	if (GET_BITFIELD(reg, 16, 16))
915  		registered = true;
916  
917  	pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
918  	if (GET_BITFIELD(reg, 14, 14)) {
919  		if (registered)
920  			mtype = MEM_RDDR4;
921  		else
922  			mtype = MEM_DDR4;
923  	} else {
924  		if (registered)
925  			mtype = MEM_RDDR3;
926  		else
927  			mtype = MEM_DDR3;
928  	}
929  
930  out:
931  	return mtype;
932  }
933  
knl_get_width(struct sbridge_pvt * pvt,u32 mtr)934  static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
935  {
936  	/* for KNL value is fixed */
937  	return DEV_X16;
938  }
939  
sbridge_get_width(struct sbridge_pvt * pvt,u32 mtr)940  static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
941  {
942  	/* there's no way to figure out */
943  	return DEV_UNKNOWN;
944  }
945  
__ibridge_get_width(u32 mtr)946  static enum dev_type __ibridge_get_width(u32 mtr)
947  {
948  	enum dev_type type = DEV_UNKNOWN;
949  
950  	switch (mtr) {
951  	case 2:
952  		type = DEV_X16;
953  		break;
954  	case 1:
955  		type = DEV_X8;
956  		break;
957  	case 0:
958  		type = DEV_X4;
959  		break;
960  	}
961  
962  	return type;
963  }
964  
ibridge_get_width(struct sbridge_pvt * pvt,u32 mtr)965  static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
966  {
967  	/*
968  	 * ddr3_width on the documentation but also valid for DDR4 on
969  	 * Haswell
970  	 */
971  	return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
972  }
973  
broadwell_get_width(struct sbridge_pvt * pvt,u32 mtr)974  static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
975  {
976  	/* ddr3_width on the documentation but also valid for DDR4 */
977  	return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
978  }
979  
knl_get_memory_type(struct sbridge_pvt * pvt)980  static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
981  {
982  	/* DDR4 RDIMMS and LRDIMMS are supported */
983  	return MEM_RDDR4;
984  }
985  
get_node_id(struct sbridge_pvt * pvt)986  static u8 get_node_id(struct sbridge_pvt *pvt)
987  {
988  	u32 reg;
989  	pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
990  	return GET_BITFIELD(reg, 0, 2);
991  }
992  
haswell_get_node_id(struct sbridge_pvt * pvt)993  static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
994  {
995  	u32 reg;
996  
997  	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
998  	return GET_BITFIELD(reg, 0, 3);
999  }
1000  
knl_get_node_id(struct sbridge_pvt * pvt)1001  static u8 knl_get_node_id(struct sbridge_pvt *pvt)
1002  {
1003  	u32 reg;
1004  
1005  	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1006  	return GET_BITFIELD(reg, 0, 2);
1007  }
1008  
1009  /*
1010   * Use the reporting bank number to determine which memory
1011   * controller (also known as "ha" for "home agent"). Sandy
1012   * Bridge only has one memory controller per socket, so the
1013   * answer is always zero.
1014   */
sbridge_get_ha(u8 bank)1015  static u8 sbridge_get_ha(u8 bank)
1016  {
1017  	return 0;
1018  }
1019  
1020  /*
1021   * On Ivy Bridge, Haswell and Broadwell the error may be in a
1022   * home agent bank (7, 8), or one of the per-channel memory
1023   * controller banks (9 .. 16).
1024   */
ibridge_get_ha(u8 bank)1025  static u8 ibridge_get_ha(u8 bank)
1026  {
1027  	switch (bank) {
1028  	case 7 ... 8:
1029  		return bank - 7;
1030  	case 9 ... 16:
1031  		return (bank - 9) / 4;
1032  	default:
1033  		return 0xff;
1034  	}
1035  }
1036  
1037  /* Not used, but included for safety/symmetry */
knl_get_ha(u8 bank)1038  static u8 knl_get_ha(u8 bank)
1039  {
1040  	return 0xff;
1041  }
1042  
haswell_get_tolm(struct sbridge_pvt * pvt)1043  static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1044  {
1045  	u32 reg;
1046  
1047  	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1048  	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1049  }
1050  
haswell_get_tohm(struct sbridge_pvt * pvt)1051  static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1052  {
1053  	u64 rc;
1054  	u32 reg;
1055  
1056  	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1057  	rc = GET_BITFIELD(reg, 26, 31);
1058  	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1059  	rc = ((reg << 6) | rc) << 26;
1060  
1061  	return rc | 0x3ffffff;
1062  }
1063  
knl_get_tolm(struct sbridge_pvt * pvt)1064  static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1065  {
1066  	u32 reg;
1067  
1068  	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1069  	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1070  }
1071  
knl_get_tohm(struct sbridge_pvt * pvt)1072  static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1073  {
1074  	u64 rc;
1075  	u32 reg_lo, reg_hi;
1076  
1077  	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1078  	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1079  	rc = ((u64)reg_hi << 32) | reg_lo;
1080  	return rc | 0x3ffffff;
1081  }
1082  
1083  
haswell_rir_limit(u32 reg)1084  static u64 haswell_rir_limit(u32 reg)
1085  {
1086  	return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1087  }
1088  
sad_pkg_socket(u8 pkg)1089  static inline u8 sad_pkg_socket(u8 pkg)
1090  {
1091  	/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1092  	return ((pkg >> 3) << 2) | (pkg & 0x3);
1093  }
1094  
sad_pkg_ha(u8 pkg)1095  static inline u8 sad_pkg_ha(u8 pkg)
1096  {
1097  	return (pkg >> 2) & 0x1;
1098  }
1099  
haswell_chan_hash(int idx,u64 addr)1100  static int haswell_chan_hash(int idx, u64 addr)
1101  {
1102  	int i;
1103  
1104  	/*
1105  	 * XOR even bits from 12:26 to bit0 of idx,
1106  	 *     odd bits from 13:27 to bit1
1107  	 */
1108  	for (i = 12; i < 28; i += 2)
1109  		idx ^= (addr >> i) & 3;
1110  
1111  	return idx;
1112  }
1113  
1114  /* Low bits of TAD limit, and some metadata. */
1115  static const u32 knl_tad_dram_limit_lo[] = {
1116  	0x400, 0x500, 0x600, 0x700,
1117  	0x800, 0x900, 0xa00, 0xb00,
1118  };
1119  
1120  /* Low bits of TAD offset. */
1121  static const u32 knl_tad_dram_offset_lo[] = {
1122  	0x404, 0x504, 0x604, 0x704,
1123  	0x804, 0x904, 0xa04, 0xb04,
1124  };
1125  
1126  /* High 16 bits of TAD limit and offset. */
1127  static const u32 knl_tad_dram_hi[] = {
1128  	0x408, 0x508, 0x608, 0x708,
1129  	0x808, 0x908, 0xa08, 0xb08,
1130  };
1131  
1132  /* Number of ways a tad entry is interleaved. */
1133  static const u32 knl_tad_ways[] = {
1134  	8, 6, 4, 3, 2, 1,
1135  };
1136  
1137  /*
1138   * Retrieve the n'th Target Address Decode table entry
1139   * from the memory controller's TAD table.
1140   *
1141   * @pvt:	driver private data
1142   * @entry:	which entry you want to retrieve
1143   * @mc:		which memory controller (0 or 1)
1144   * @offset:	output tad range offset
1145   * @limit:	output address of first byte above tad range
1146   * @ways:	output number of interleave ways
1147   *
1148   * The offset value has curious semantics.  It's a sort of running total
1149   * of the sizes of all the memory regions that aren't mapped in this
1150   * tad table.
1151   */
knl_get_tad(const struct sbridge_pvt * pvt,const int entry,const int mc,u64 * offset,u64 * limit,int * ways)1152  static int knl_get_tad(const struct sbridge_pvt *pvt,
1153  		const int entry,
1154  		const int mc,
1155  		u64 *offset,
1156  		u64 *limit,
1157  		int *ways)
1158  {
1159  	u32 reg_limit_lo, reg_offset_lo, reg_hi;
1160  	struct pci_dev *pci_mc;
1161  	int way_id;
1162  
1163  	switch (mc) {
1164  	case 0:
1165  		pci_mc = pvt->knl.pci_mc0;
1166  		break;
1167  	case 1:
1168  		pci_mc = pvt->knl.pci_mc1;
1169  		break;
1170  	default:
1171  		WARN_ON(1);
1172  		return -EINVAL;
1173  	}
1174  
1175  	pci_read_config_dword(pci_mc,
1176  			knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1177  	pci_read_config_dword(pci_mc,
1178  			knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1179  	pci_read_config_dword(pci_mc,
1180  			knl_tad_dram_hi[entry], &reg_hi);
1181  
1182  	/* Is this TAD entry enabled? */
1183  	if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1184  		return -ENODEV;
1185  
1186  	way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1187  
1188  	if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1189  		*ways = knl_tad_ways[way_id];
1190  	} else {
1191  		*ways = 0;
1192  		sbridge_printk(KERN_ERR,
1193  				"Unexpected value %d in mc_tad_limit_lo wayness field\n",
1194  				way_id);
1195  		return -ENODEV;
1196  	}
1197  
1198  	/*
1199  	 * The least significant 6 bits of base and limit are truncated.
1200  	 * For limit, we fill the missing bits with 1s.
1201  	 */
1202  	*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1203  				((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1204  	*limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1205  				((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1206  
1207  	return 0;
1208  }
1209  
1210  /* Determine which memory controller is responsible for a given channel. */
knl_channel_mc(int channel)1211  static int knl_channel_mc(int channel)
1212  {
1213  	WARN_ON(channel < 0 || channel >= 6);
1214  
1215  	return channel < 3 ? 1 : 0;
1216  }
1217  
1218  /*
1219   * Get the Nth entry from EDC_ROUTE_TABLE register.
1220   * (This is the per-tile mapping of logical interleave targets to
1221   *  physical EDC modules.)
1222   *
1223   * entry 0: 0:2
1224   *       1: 3:5
1225   *       2: 6:8
1226   *       3: 9:11
1227   *       4: 12:14
1228   *       5: 15:17
1229   *       6: 18:20
1230   *       7: 21:23
1231   * reserved: 24:31
1232   */
knl_get_edc_route(int entry,u32 reg)1233  static u32 knl_get_edc_route(int entry, u32 reg)
1234  {
1235  	WARN_ON(entry >= KNL_MAX_EDCS);
1236  	return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1237  }
1238  
1239  /*
1240   * Get the Nth entry from MC_ROUTE_TABLE register.
1241   * (This is the per-tile mapping of logical interleave targets to
1242   *  physical DRAM channels modules.)
1243   *
1244   * entry 0: mc 0:2   channel 18:19
1245   *       1: mc 3:5   channel 20:21
1246   *       2: mc 6:8   channel 22:23
1247   *       3: mc 9:11  channel 24:25
1248   *       4: mc 12:14 channel 26:27
1249   *       5: mc 15:17 channel 28:29
1250   * reserved: 30:31
1251   *
1252   * Though we have 3 bits to identify the MC, we should only see
1253   * the values 0 or 1.
1254   */
1255  
knl_get_mc_route(int entry,u32 reg)1256  static u32 knl_get_mc_route(int entry, u32 reg)
1257  {
1258  	int mc, chan;
1259  
1260  	WARN_ON(entry >= KNL_MAX_CHANNELS);
1261  
1262  	mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1263  	chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1264  
1265  	return knl_channel_remap(mc, chan);
1266  }
1267  
1268  /*
1269   * Render the EDC_ROUTE register in human-readable form.
1270   * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1271   */
knl_show_edc_route(u32 reg,char * s)1272  static void knl_show_edc_route(u32 reg, char *s)
1273  {
1274  	int i;
1275  
1276  	for (i = 0; i < KNL_MAX_EDCS; i++) {
1277  		s[i*2] = knl_get_edc_route(i, reg) + '0';
1278  		s[i*2+1] = '-';
1279  	}
1280  
1281  	s[KNL_MAX_EDCS*2 - 1] = '\0';
1282  }
1283  
1284  /*
1285   * Render the MC_ROUTE register in human-readable form.
1286   * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1287   */
knl_show_mc_route(u32 reg,char * s)1288  static void knl_show_mc_route(u32 reg, char *s)
1289  {
1290  	int i;
1291  
1292  	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1293  		s[i*2] = knl_get_mc_route(i, reg) + '0';
1294  		s[i*2+1] = '-';
1295  	}
1296  
1297  	s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1298  }
1299  
1300  #define KNL_EDC_ROUTE 0xb8
1301  #define KNL_MC_ROUTE 0xb4
1302  
1303  /* Is this dram rule backed by regular DRAM in flat mode? */
1304  #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1305  
1306  /* Is this dram rule cached? */
1307  #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1308  
1309  /* Is this rule backed by edc ? */
1310  #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1311  
1312  /* Is this rule backed by DRAM, cacheable in EDRAM? */
1313  #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1314  
1315  /* Is this rule mod3? */
1316  #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1317  
1318  /*
1319   * Figure out how big our RAM modules are.
1320   *
1321   * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1322   * have to figure this out from the SAD rules, interleave lists, route tables,
1323   * and TAD rules.
1324   *
1325   * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1326   * inspect the TAD rules to figure out how large the SAD regions really are.
1327   *
1328   * When we know the real size of a SAD region and how many ways it's
1329   * interleaved, we know the individual contribution of each channel to
1330   * TAD is size/ways.
1331   *
1332   * Finally, we have to check whether each channel participates in each SAD
1333   * region.
1334   *
1335   * Fortunately, KNL only supports one DIMM per channel, so once we know how
1336   * much memory the channel uses, we know the DIMM is at least that large.
1337   * (The BIOS might possibly choose not to map all available memory, in which
1338   * case we will underreport the size of the DIMM.)
1339   *
1340   * In theory, we could try to determine the EDC sizes as well, but that would
1341   * only work in flat mode, not in cache mode.
1342   *
1343   * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1344   *            elements)
1345   */
knl_get_dimm_capacity(struct sbridge_pvt * pvt,u64 * mc_sizes)1346  static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1347  {
1348  	u64 sad_base, sad_limit = 0;
1349  	u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1350  	int sad_rule = 0;
1351  	int tad_rule = 0;
1352  	int intrlv_ways, tad_ways;
1353  	u32 first_pkg, pkg;
1354  	int i;
1355  	u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1356  	u32 dram_rule, interleave_reg;
1357  	u32 mc_route_reg[KNL_MAX_CHAS];
1358  	u32 edc_route_reg[KNL_MAX_CHAS];
1359  	int edram_only;
1360  	char edc_route_string[KNL_MAX_EDCS*2];
1361  	char mc_route_string[KNL_MAX_CHANNELS*2];
1362  	int cur_reg_start;
1363  	int mc;
1364  	int channel;
1365  	int participants[KNL_MAX_CHANNELS];
1366  
1367  	for (i = 0; i < KNL_MAX_CHANNELS; i++)
1368  		mc_sizes[i] = 0;
1369  
1370  	/* Read the EDC route table in each CHA. */
1371  	cur_reg_start = 0;
1372  	for (i = 0; i < KNL_MAX_CHAS; i++) {
1373  		pci_read_config_dword(pvt->knl.pci_cha[i],
1374  				KNL_EDC_ROUTE, &edc_route_reg[i]);
1375  
1376  		if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1377  			knl_show_edc_route(edc_route_reg[i-1],
1378  					edc_route_string);
1379  			if (cur_reg_start == i-1)
1380  				edac_dbg(0, "edc route table for CHA %d: %s\n",
1381  					cur_reg_start, edc_route_string);
1382  			else
1383  				edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1384  					cur_reg_start, i-1, edc_route_string);
1385  			cur_reg_start = i;
1386  		}
1387  	}
1388  	knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1389  	if (cur_reg_start == i-1)
1390  		edac_dbg(0, "edc route table for CHA %d: %s\n",
1391  			cur_reg_start, edc_route_string);
1392  	else
1393  		edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1394  			cur_reg_start, i-1, edc_route_string);
1395  
1396  	/* Read the MC route table in each CHA. */
1397  	cur_reg_start = 0;
1398  	for (i = 0; i < KNL_MAX_CHAS; i++) {
1399  		pci_read_config_dword(pvt->knl.pci_cha[i],
1400  			KNL_MC_ROUTE, &mc_route_reg[i]);
1401  
1402  		if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1403  			knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1404  			if (cur_reg_start == i-1)
1405  				edac_dbg(0, "mc route table for CHA %d: %s\n",
1406  					cur_reg_start, mc_route_string);
1407  			else
1408  				edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1409  					cur_reg_start, i-1, mc_route_string);
1410  			cur_reg_start = i;
1411  		}
1412  	}
1413  	knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1414  	if (cur_reg_start == i-1)
1415  		edac_dbg(0, "mc route table for CHA %d: %s\n",
1416  			cur_reg_start, mc_route_string);
1417  	else
1418  		edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1419  			cur_reg_start, i-1, mc_route_string);
1420  
1421  	/* Process DRAM rules */
1422  	for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1423  		/* previous limit becomes the new base */
1424  		sad_base = sad_limit;
1425  
1426  		pci_read_config_dword(pvt->pci_sad0,
1427  			pvt->info.dram_rule[sad_rule], &dram_rule);
1428  
1429  		if (!DRAM_RULE_ENABLE(dram_rule))
1430  			break;
1431  
1432  		edram_only = KNL_EDRAM_ONLY(dram_rule);
1433  
1434  		sad_limit = pvt->info.sad_limit(dram_rule)+1;
1435  
1436  		pci_read_config_dword(pvt->pci_sad0,
1437  			pvt->info.interleave_list[sad_rule], &interleave_reg);
1438  
1439  		/*
1440  		 * Find out how many ways this dram rule is interleaved.
1441  		 * We stop when we see the first channel again.
1442  		 */
1443  		first_pkg = sad_pkg(pvt->info.interleave_pkg,
1444  						interleave_reg, 0);
1445  		for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1446  			pkg = sad_pkg(pvt->info.interleave_pkg,
1447  						interleave_reg, intrlv_ways);
1448  
1449  			if ((pkg & 0x8) == 0) {
1450  				/*
1451  				 * 0 bit means memory is non-local,
1452  				 * which KNL doesn't support
1453  				 */
1454  				edac_dbg(0, "Unexpected interleave target %d\n",
1455  					pkg);
1456  				return -1;
1457  			}
1458  
1459  			if (pkg == first_pkg)
1460  				break;
1461  		}
1462  		if (KNL_MOD3(dram_rule))
1463  			intrlv_ways *= 3;
1464  
1465  		edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1466  			sad_rule,
1467  			sad_base,
1468  			sad_limit,
1469  			intrlv_ways,
1470  			edram_only ? ", EDRAM" : "");
1471  
1472  		/*
1473  		 * Find out how big the SAD region really is by iterating
1474  		 * over TAD tables (SAD regions may contain holes).
1475  		 * Each memory controller might have a different TAD table, so
1476  		 * we have to look at both.
1477  		 *
1478  		 * Livespace is the memory that's mapped in this TAD table,
1479  		 * deadspace is the holes (this could be the MMIO hole, or it
1480  		 * could be memory that's mapped by the other TAD table but
1481  		 * not this one).
1482  		 */
1483  		for (mc = 0; mc < 2; mc++) {
1484  			sad_actual_size[mc] = 0;
1485  			tad_livespace = 0;
1486  			for (tad_rule = 0;
1487  					tad_rule < ARRAY_SIZE(
1488  						knl_tad_dram_limit_lo);
1489  					tad_rule++) {
1490  				if (knl_get_tad(pvt,
1491  						tad_rule,
1492  						mc,
1493  						&tad_deadspace,
1494  						&tad_limit,
1495  						&tad_ways))
1496  					break;
1497  
1498  				tad_size = (tad_limit+1) -
1499  					(tad_livespace + tad_deadspace);
1500  				tad_livespace += tad_size;
1501  				tad_base = (tad_limit+1) - tad_size;
1502  
1503  				if (tad_base < sad_base) {
1504  					if (tad_limit > sad_base)
1505  						edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1506  				} else if (tad_base < sad_limit) {
1507  					if (tad_limit+1 > sad_limit) {
1508  						edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1509  					} else {
1510  						/* TAD region is completely inside SAD region */
1511  						edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1512  							tad_rule, tad_base,
1513  							tad_limit, tad_size,
1514  							mc);
1515  						sad_actual_size[mc] += tad_size;
1516  					}
1517  				}
1518  			}
1519  		}
1520  
1521  		for (mc = 0; mc < 2; mc++) {
1522  			edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1523  				mc, sad_actual_size[mc], sad_actual_size[mc]);
1524  		}
1525  
1526  		/* Ignore EDRAM rule */
1527  		if (edram_only)
1528  			continue;
1529  
1530  		/* Figure out which channels participate in interleave. */
1531  		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1532  			participants[channel] = 0;
1533  
1534  		/* For each channel, does at least one CHA have
1535  		 * this channel mapped to the given target?
1536  		 */
1537  		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1538  			int target;
1539  			int cha;
1540  
1541  			for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1542  				for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1543  					if (knl_get_mc_route(target,
1544  						mc_route_reg[cha]) == channel
1545  						&& !participants[channel]) {
1546  						participants[channel] = 1;
1547  						break;
1548  					}
1549  				}
1550  			}
1551  		}
1552  
1553  		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1554  			mc = knl_channel_mc(channel);
1555  			if (participants[channel]) {
1556  				edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1557  					channel,
1558  					sad_actual_size[mc]/intrlv_ways,
1559  					sad_rule);
1560  				mc_sizes[channel] +=
1561  					sad_actual_size[mc]/intrlv_ways;
1562  			}
1563  		}
1564  	}
1565  
1566  	return 0;
1567  }
1568  
get_source_id(struct mem_ctl_info * mci)1569  static void get_source_id(struct mem_ctl_info *mci)
1570  {
1571  	struct sbridge_pvt *pvt = mci->pvt_info;
1572  	u32 reg;
1573  
1574  	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1575  	    pvt->info.type == KNIGHTS_LANDING)
1576  		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1577  	else
1578  		pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1579  
1580  	if (pvt->info.type == KNIGHTS_LANDING)
1581  		pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1582  	else
1583  		pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1584  }
1585  
__populate_dimms(struct mem_ctl_info * mci,u64 knl_mc_sizes[KNL_MAX_CHANNELS],enum edac_type mode)1586  static int __populate_dimms(struct mem_ctl_info *mci,
1587  			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1588  			    enum edac_type mode)
1589  {
1590  	struct sbridge_pvt *pvt = mci->pvt_info;
1591  	int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1592  							 : NUM_CHANNELS;
1593  	unsigned int i, j, banks, ranks, rows, cols, npages;
1594  	struct dimm_info *dimm;
1595  	enum mem_type mtype;
1596  	u64 size;
1597  
1598  	mtype = pvt->info.get_memory_type(pvt);
1599  	if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1600  		edac_dbg(0, "Memory is registered\n");
1601  	else if (mtype == MEM_UNKNOWN)
1602  		edac_dbg(0, "Cannot determine memory type\n");
1603  	else
1604  		edac_dbg(0, "Memory is unregistered\n");
1605  
1606  	if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1607  		banks = 16;
1608  	else
1609  		banks = 8;
1610  
1611  	for (i = 0; i < channels; i++) {
1612  		u32 mtr, amap = 0;
1613  
1614  		int max_dimms_per_channel;
1615  
1616  		if (pvt->info.type == KNIGHTS_LANDING) {
1617  			max_dimms_per_channel = 1;
1618  			if (!pvt->knl.pci_channel[i])
1619  				continue;
1620  		} else {
1621  			max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1622  			if (!pvt->pci_tad[i])
1623  				continue;
1624  			pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap);
1625  		}
1626  
1627  		for (j = 0; j < max_dimms_per_channel; j++) {
1628  			dimm = edac_get_dimm(mci, i, j, 0);
1629  			if (pvt->info.type == KNIGHTS_LANDING) {
1630  				pci_read_config_dword(pvt->knl.pci_channel[i],
1631  					knl_mtr_reg, &mtr);
1632  			} else {
1633  				pci_read_config_dword(pvt->pci_tad[i],
1634  					mtr_regs[j], &mtr);
1635  			}
1636  			edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
1637  
1638  			if (IS_DIMM_PRESENT(mtr)) {
1639  				if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1640  					sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1641  						       pvt->sbridge_dev->source_id,
1642  						       pvt->sbridge_dev->dom, i);
1643  					return -ENODEV;
1644  				}
1645  				pvt->channel[i].dimms++;
1646  
1647  				ranks = numrank(pvt->info.type, mtr);
1648  
1649  				if (pvt->info.type == KNIGHTS_LANDING) {
1650  					/* For DDR4, this is fixed. */
1651  					cols = 1 << 10;
1652  					rows = knl_mc_sizes[i] /
1653  						((u64) cols * ranks * banks * 8);
1654  				} else {
1655  					rows = numrow(mtr);
1656  					cols = numcol(mtr);
1657  				}
1658  
1659  				size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1660  				npages = MiB_TO_PAGES(size);
1661  
1662  				edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1663  					 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1664  					 size, npages,
1665  					 banks, ranks, rows, cols);
1666  
1667  				dimm->nr_pages = npages;
1668  				dimm->grain = 32;
1669  				dimm->dtype = pvt->info.get_width(pvt, mtr);
1670  				dimm->mtype = mtype;
1671  				dimm->edac_mode = mode;
1672  				pvt->channel[i].dimm[j].rowbits = order_base_2(rows);
1673  				pvt->channel[i].dimm[j].colbits = order_base_2(cols);
1674  				pvt->channel[i].dimm[j].bank_xor_enable =
1675  						GET_BITFIELD(pvt->info.mcmtr, 9, 9);
1676  				pvt->channel[i].dimm[j].amap_fine = GET_BITFIELD(amap, 0, 0);
1677  				snprintf(dimm->label, sizeof(dimm->label),
1678  						 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1679  						 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1680  			}
1681  		}
1682  	}
1683  
1684  	return 0;
1685  }
1686  
get_dimm_config(struct mem_ctl_info * mci)1687  static int get_dimm_config(struct mem_ctl_info *mci)
1688  {
1689  	struct sbridge_pvt *pvt = mci->pvt_info;
1690  	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1691  	enum edac_type mode;
1692  	u32 reg;
1693  
1694  	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1695  	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1696  		 pvt->sbridge_dev->mc,
1697  		 pvt->sbridge_dev->node_id,
1698  		 pvt->sbridge_dev->source_id);
1699  
1700  	/* KNL doesn't support mirroring or lockstep,
1701  	 * and is always closed page
1702  	 */
1703  	if (pvt->info.type == KNIGHTS_LANDING) {
1704  		mode = EDAC_S4ECD4ED;
1705  		pvt->mirror_mode = NON_MIRRORING;
1706  		pvt->is_cur_addr_mirrored = false;
1707  
1708  		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1709  			return -1;
1710  		if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1711  			edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1712  			return -ENODEV;
1713  		}
1714  	} else {
1715  		if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1716  			if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
1717  				edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1718  				return -ENODEV;
1719  			}
1720  			pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1721  			if (GET_BITFIELD(reg, 28, 28)) {
1722  				pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1723  				edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1724  				goto next;
1725  			}
1726  		}
1727  		if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
1728  			edac_dbg(0, "Failed to read RASENABLES register\n");
1729  			return -ENODEV;
1730  		}
1731  		if (IS_MIRROR_ENABLED(reg)) {
1732  			pvt->mirror_mode = FULL_MIRRORING;
1733  			edac_dbg(0, "Full memory mirroring is enabled\n");
1734  		} else {
1735  			pvt->mirror_mode = NON_MIRRORING;
1736  			edac_dbg(0, "Memory mirroring is disabled\n");
1737  		}
1738  
1739  next:
1740  		if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1741  			edac_dbg(0, "Failed to read MCMTR register\n");
1742  			return -ENODEV;
1743  		}
1744  		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1745  			edac_dbg(0, "Lockstep is enabled\n");
1746  			mode = EDAC_S8ECD8ED;
1747  			pvt->is_lockstep = true;
1748  		} else {
1749  			edac_dbg(0, "Lockstep is disabled\n");
1750  			mode = EDAC_S4ECD4ED;
1751  			pvt->is_lockstep = false;
1752  		}
1753  		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1754  			edac_dbg(0, "address map is on closed page mode\n");
1755  			pvt->is_close_pg = true;
1756  		} else {
1757  			edac_dbg(0, "address map is on open page mode\n");
1758  			pvt->is_close_pg = false;
1759  		}
1760  	}
1761  
1762  	return __populate_dimms(mci, knl_mc_sizes, mode);
1763  }
1764  
get_memory_layout(const struct mem_ctl_info * mci)1765  static void get_memory_layout(const struct mem_ctl_info *mci)
1766  {
1767  	struct sbridge_pvt *pvt = mci->pvt_info;
1768  	int i, j, k, n_sads, n_tads, sad_interl;
1769  	u32 reg;
1770  	u64 limit, prv = 0;
1771  	u64 tmp_mb;
1772  	u32 gb, mb;
1773  	u32 rir_way;
1774  
1775  	/*
1776  	 * Step 1) Get TOLM/TOHM ranges
1777  	 */
1778  
1779  	pvt->tolm = pvt->info.get_tolm(pvt);
1780  	tmp_mb = (1 + pvt->tolm) >> 20;
1781  
1782  	gb = div_u64_rem(tmp_mb, 1024, &mb);
1783  	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1784  		gb, (mb*1000)/1024, (u64)pvt->tolm);
1785  
1786  	/* Address range is already 45:25 */
1787  	pvt->tohm = pvt->info.get_tohm(pvt);
1788  	tmp_mb = (1 + pvt->tohm) >> 20;
1789  
1790  	gb = div_u64_rem(tmp_mb, 1024, &mb);
1791  	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1792  		gb, (mb*1000)/1024, (u64)pvt->tohm);
1793  
1794  	/*
1795  	 * Step 2) Get SAD range and SAD Interleave list
1796  	 * TAD registers contain the interleave wayness. However, it
1797  	 * seems simpler to just discover it indirectly, with the
1798  	 * algorithm bellow.
1799  	 */
1800  	prv = 0;
1801  	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1802  		/* SAD_LIMIT Address range is 45:26 */
1803  		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1804  				      &reg);
1805  		limit = pvt->info.sad_limit(reg);
1806  
1807  		if (!DRAM_RULE_ENABLE(reg))
1808  			continue;
1809  
1810  		if (limit <= prv)
1811  			break;
1812  
1813  		tmp_mb = (limit + 1) >> 20;
1814  		gb = div_u64_rem(tmp_mb, 1024, &mb);
1815  		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1816  			 n_sads,
1817  			 show_dram_attr(pvt->info.dram_attr(reg)),
1818  			 gb, (mb*1000)/1024,
1819  			 ((u64)tmp_mb) << 20L,
1820  			 get_intlv_mode_str(reg, pvt->info.type),
1821  			 reg);
1822  		prv = limit;
1823  
1824  		pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1825  				      &reg);
1826  		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1827  		for (j = 0; j < 8; j++) {
1828  			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1829  			if (j > 0 && sad_interl == pkg)
1830  				break;
1831  
1832  			edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1833  				 n_sads, j, pkg);
1834  		}
1835  	}
1836  
1837  	if (pvt->info.type == KNIGHTS_LANDING)
1838  		return;
1839  
1840  	/*
1841  	 * Step 3) Get TAD range
1842  	 */
1843  	prv = 0;
1844  	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1845  		pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
1846  		limit = TAD_LIMIT(reg);
1847  		if (limit <= prv)
1848  			break;
1849  		tmp_mb = (limit + 1) >> 20;
1850  
1851  		gb = div_u64_rem(tmp_mb, 1024, &mb);
1852  		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1853  			 n_tads, gb, (mb*1000)/1024,
1854  			 ((u64)tmp_mb) << 20L,
1855  			 (u32)(1 << TAD_SOCK(reg)),
1856  			 (u32)TAD_CH(reg) + 1,
1857  			 (u32)TAD_TGT0(reg),
1858  			 (u32)TAD_TGT1(reg),
1859  			 (u32)TAD_TGT2(reg),
1860  			 (u32)TAD_TGT3(reg),
1861  			 reg);
1862  		prv = limit;
1863  	}
1864  
1865  	/*
1866  	 * Step 4) Get TAD offsets, per each channel
1867  	 */
1868  	for (i = 0; i < NUM_CHANNELS; i++) {
1869  		if (!pvt->channel[i].dimms)
1870  			continue;
1871  		for (j = 0; j < n_tads; j++) {
1872  			pci_read_config_dword(pvt->pci_tad[i],
1873  					      tad_ch_nilv_offset[j],
1874  					      &reg);
1875  			tmp_mb = TAD_OFFSET(reg) >> 20;
1876  			gb = div_u64_rem(tmp_mb, 1024, &mb);
1877  			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1878  				 i, j,
1879  				 gb, (mb*1000)/1024,
1880  				 ((u64)tmp_mb) << 20L,
1881  				 reg);
1882  		}
1883  	}
1884  
1885  	/*
1886  	 * Step 6) Get RIR Wayness/Limit, per each channel
1887  	 */
1888  	for (i = 0; i < NUM_CHANNELS; i++) {
1889  		if (!pvt->channel[i].dimms)
1890  			continue;
1891  		for (j = 0; j < MAX_RIR_RANGES; j++) {
1892  			pci_read_config_dword(pvt->pci_tad[i],
1893  					      rir_way_limit[j],
1894  					      &reg);
1895  
1896  			if (!IS_RIR_VALID(reg))
1897  				continue;
1898  
1899  			tmp_mb = pvt->info.rir_limit(reg) >> 20;
1900  			rir_way = 1 << RIR_WAY(reg);
1901  			gb = div_u64_rem(tmp_mb, 1024, &mb);
1902  			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1903  				 i, j,
1904  				 gb, (mb*1000)/1024,
1905  				 ((u64)tmp_mb) << 20L,
1906  				 rir_way,
1907  				 reg);
1908  
1909  			for (k = 0; k < rir_way; k++) {
1910  				pci_read_config_dword(pvt->pci_tad[i],
1911  						      rir_offset[j][k],
1912  						      &reg);
1913  				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1914  
1915  				gb = div_u64_rem(tmp_mb, 1024, &mb);
1916  				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1917  					 i, j, k,
1918  					 gb, (mb*1000)/1024,
1919  					 ((u64)tmp_mb) << 20L,
1920  					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1921  					 reg);
1922  			}
1923  		}
1924  	}
1925  }
1926  
get_mci_for_node_id(u8 node_id,u8 ha)1927  static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1928  {
1929  	struct sbridge_dev *sbridge_dev;
1930  
1931  	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1932  		if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1933  			return sbridge_dev->mci;
1934  	}
1935  	return NULL;
1936  }
1937  
1938  static u8 sb_close_row[] = {
1939  	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
1940  };
1941  
1942  static u8 sb_close_column[] = {
1943  	3, 4, 5, 14, 19, 23, 24, 25, 26, 27
1944  };
1945  
1946  static u8 sb_open_row[] = {
1947  	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
1948  };
1949  
1950  static u8 sb_open_column[] = {
1951  	3, 4, 5, 6, 7, 8, 9, 10, 11, 12
1952  };
1953  
1954  static u8 sb_open_fine_column[] = {
1955  	3, 4, 5, 7, 8, 9, 10, 11, 12, 13
1956  };
1957  
sb_bits(u64 addr,int nbits,u8 * bits)1958  static int sb_bits(u64 addr, int nbits, u8 *bits)
1959  {
1960  	int i, res = 0;
1961  
1962  	for (i = 0; i < nbits; i++)
1963  		res |= ((addr >> bits[i]) & 1) << i;
1964  	return res;
1965  }
1966  
sb_bank_bits(u64 addr,int b0,int b1,int do_xor,int x0,int x1)1967  static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
1968  {
1969  	int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
1970  
1971  	if (do_xor)
1972  		ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
1973  
1974  	return ret;
1975  }
1976  
sb_decode_ddr4(struct mem_ctl_info * mci,int ch,u8 rank,u64 rank_addr,char * msg)1977  static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank,
1978  			   u64 rank_addr, char *msg)
1979  {
1980  	int dimmno = 0;
1981  	int row, col, bank_address, bank_group;
1982  	struct sbridge_pvt *pvt;
1983  	u32 bg0 = 0, rowbits = 0, colbits = 0;
1984  	u32 amap_fine = 0, bank_xor_enable = 0;
1985  
1986  	dimmno = (rank < 12) ? rank / 4 : 2;
1987  	pvt = mci->pvt_info;
1988  	amap_fine =  pvt->channel[ch].dimm[dimmno].amap_fine;
1989  	bg0 = amap_fine ? 6 : 13;
1990  	rowbits = pvt->channel[ch].dimm[dimmno].rowbits;
1991  	colbits = pvt->channel[ch].dimm[dimmno].colbits;
1992  	bank_xor_enable = pvt->channel[ch].dimm[dimmno].bank_xor_enable;
1993  
1994  	if (pvt->is_lockstep) {
1995  		pr_warn_once("LockStep row/column decode is not supported yet!\n");
1996  		msg[0] = '\0';
1997  		return false;
1998  	}
1999  
2000  	if (pvt->is_close_pg) {
2001  		row = sb_bits(rank_addr, rowbits, sb_close_row);
2002  		col = sb_bits(rank_addr, colbits, sb_close_column);
2003  		col |= 0x400; /* C10 is autoprecharge, always set */
2004  		bank_address = sb_bank_bits(rank_addr, 8, 9, bank_xor_enable, 22, 28);
2005  		bank_group = sb_bank_bits(rank_addr, 6, 7, bank_xor_enable, 20, 21);
2006  	} else {
2007  		row = sb_bits(rank_addr, rowbits, sb_open_row);
2008  		if (amap_fine)
2009  			col = sb_bits(rank_addr, colbits, sb_open_fine_column);
2010  		else
2011  			col = sb_bits(rank_addr, colbits, sb_open_column);
2012  		bank_address = sb_bank_bits(rank_addr, 18, 19, bank_xor_enable, 22, 23);
2013  		bank_group = sb_bank_bits(rank_addr, bg0, 17, bank_xor_enable, 20, 21);
2014  	}
2015  
2016  	row &= (1u << rowbits) - 1;
2017  
2018  	sprintf(msg, "row:0x%x col:0x%x bank_addr:%d bank_group:%d",
2019  		row, col, bank_address, bank_group);
2020  	return true;
2021  }
2022  
sb_decode_ddr3(struct mem_ctl_info * mci,int ch,u8 rank,u64 rank_addr,char * msg)2023  static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank,
2024  			   u64 rank_addr, char *msg)
2025  {
2026  	pr_warn_once("DDR3 row/column decode not support yet!\n");
2027  	msg[0] = '\0';
2028  	return false;
2029  }
2030  
get_memory_error_data(struct mem_ctl_info * mci,u64 addr,u8 * socket,u8 * ha,long * channel_mask,u8 * rank,char ** area_type,char * msg)2031  static int get_memory_error_data(struct mem_ctl_info *mci,
2032  				 u64 addr,
2033  				 u8 *socket, u8 *ha,
2034  				 long *channel_mask,
2035  				 u8 *rank,
2036  				 char **area_type, char *msg)
2037  {
2038  	struct mem_ctl_info	*new_mci;
2039  	struct sbridge_pvt *pvt = mci->pvt_info;
2040  	struct pci_dev		*pci_ha;
2041  	int			n_rir, n_sads, n_tads, sad_way, sck_xch;
2042  	int			sad_interl, idx, base_ch;
2043  	int			interleave_mode, shiftup = 0;
2044  	unsigned int		sad_interleave[MAX_INTERLEAVE];
2045  	u32			reg, dram_rule;
2046  	u8			ch_way, sck_way, pkg, sad_ha = 0, rankid = 0;
2047  	u32			tad_offset;
2048  	u32			rir_way;
2049  	u32			mb, gb;
2050  	u64			ch_addr, offset, limit = 0, prv = 0;
2051  	u64			rank_addr;
2052  	enum mem_type		mtype;
2053  
2054  	/*
2055  	 * Step 0) Check if the address is at special memory ranges
2056  	 * The check bellow is probably enough to fill all cases where
2057  	 * the error is not inside a memory, except for the legacy
2058  	 * range (e. g. VGA addresses). It is unlikely, however, that the
2059  	 * memory controller would generate an error on that range.
2060  	 */
2061  	if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
2062  		sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
2063  		return -EINVAL;
2064  	}
2065  	if (addr >= (u64)pvt->tohm) {
2066  		sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
2067  		return -EINVAL;
2068  	}
2069  
2070  	/*
2071  	 * Step 1) Get socket
2072  	 */
2073  	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
2074  		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
2075  				      &reg);
2076  
2077  		if (!DRAM_RULE_ENABLE(reg))
2078  			continue;
2079  
2080  		limit = pvt->info.sad_limit(reg);
2081  		if (limit <= prv) {
2082  			sprintf(msg, "Can't discover the memory socket");
2083  			return -EINVAL;
2084  		}
2085  		if  (addr <= limit)
2086  			break;
2087  		prv = limit;
2088  	}
2089  	if (n_sads == pvt->info.max_sad) {
2090  		sprintf(msg, "Can't discover the memory socket");
2091  		return -EINVAL;
2092  	}
2093  	dram_rule = reg;
2094  	*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
2095  	interleave_mode = pvt->info.interleave_mode(dram_rule);
2096  
2097  	pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
2098  			      &reg);
2099  
2100  	if (pvt->info.type == SANDY_BRIDGE) {
2101  		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
2102  		for (sad_way = 0; sad_way < 8; sad_way++) {
2103  			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
2104  			if (sad_way > 0 && sad_interl == pkg)
2105  				break;
2106  			sad_interleave[sad_way] = pkg;
2107  			edac_dbg(0, "SAD interleave #%d: %d\n",
2108  				 sad_way, sad_interleave[sad_way]);
2109  		}
2110  		edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2111  			 pvt->sbridge_dev->mc,
2112  			 n_sads,
2113  			 addr,
2114  			 limit,
2115  			 sad_way + 7,
2116  			 !interleave_mode ? "" : "XOR[18:16]");
2117  		if (interleave_mode)
2118  			idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2119  		else
2120  			idx = (addr >> 6) & 7;
2121  		switch (sad_way) {
2122  		case 1:
2123  			idx = 0;
2124  			break;
2125  		case 2:
2126  			idx = idx & 1;
2127  			break;
2128  		case 4:
2129  			idx = idx & 3;
2130  			break;
2131  		case 8:
2132  			break;
2133  		default:
2134  			sprintf(msg, "Can't discover socket interleave");
2135  			return -EINVAL;
2136  		}
2137  		*socket = sad_interleave[idx];
2138  		edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2139  			 idx, sad_way, *socket);
2140  	} else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2141  		int bits, a7mode = A7MODE(dram_rule);
2142  
2143  		if (a7mode) {
2144  			/* A7 mode swaps P9 with P6 */
2145  			bits = GET_BITFIELD(addr, 7, 8) << 1;
2146  			bits |= GET_BITFIELD(addr, 9, 9);
2147  		} else
2148  			bits = GET_BITFIELD(addr, 6, 8);
2149  
2150  		if (interleave_mode == 0) {
2151  			/* interleave mode will XOR {8,7,6} with {18,17,16} */
2152  			idx = GET_BITFIELD(addr, 16, 18);
2153  			idx ^= bits;
2154  		} else
2155  			idx = bits;
2156  
2157  		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2158  		*socket = sad_pkg_socket(pkg);
2159  		sad_ha = sad_pkg_ha(pkg);
2160  
2161  		if (a7mode) {
2162  			/* MCChanShiftUpEnable */
2163  			pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
2164  			shiftup = GET_BITFIELD(reg, 22, 22);
2165  		}
2166  
2167  		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2168  			 idx, *socket, sad_ha, shiftup);
2169  	} else {
2170  		/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2171  		idx = (addr >> 6) & 7;
2172  		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2173  		*socket = sad_pkg_socket(pkg);
2174  		sad_ha = sad_pkg_ha(pkg);
2175  		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2176  			 idx, *socket, sad_ha);
2177  	}
2178  
2179  	*ha = sad_ha;
2180  
2181  	/*
2182  	 * Move to the proper node structure, in order to access the
2183  	 * right PCI registers
2184  	 */
2185  	new_mci = get_mci_for_node_id(*socket, sad_ha);
2186  	if (!new_mci) {
2187  		sprintf(msg, "Struct for socket #%u wasn't initialized",
2188  			*socket);
2189  		return -EINVAL;
2190  	}
2191  	mci = new_mci;
2192  	pvt = mci->pvt_info;
2193  
2194  	/*
2195  	 * Step 2) Get memory channel
2196  	 */
2197  	prv = 0;
2198  	pci_ha = pvt->pci_ha;
2199  	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2200  		pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2201  		limit = TAD_LIMIT(reg);
2202  		if (limit <= prv) {
2203  			sprintf(msg, "Can't discover the memory channel");
2204  			return -EINVAL;
2205  		}
2206  		if  (addr <= limit)
2207  			break;
2208  		prv = limit;
2209  	}
2210  	if (n_tads == MAX_TAD) {
2211  		sprintf(msg, "Can't discover the memory channel");
2212  		return -EINVAL;
2213  	}
2214  
2215  	ch_way = TAD_CH(reg) + 1;
2216  	sck_way = TAD_SOCK(reg);
2217  
2218  	if (ch_way == 3)
2219  		idx = addr >> 6;
2220  	else {
2221  		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2222  		if (pvt->is_chan_hash)
2223  			idx = haswell_chan_hash(idx, addr);
2224  	}
2225  	idx = idx % ch_way;
2226  
2227  	/*
2228  	 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2229  	 */
2230  	switch (idx) {
2231  	case 0:
2232  		base_ch = TAD_TGT0(reg);
2233  		break;
2234  	case 1:
2235  		base_ch = TAD_TGT1(reg);
2236  		break;
2237  	case 2:
2238  		base_ch = TAD_TGT2(reg);
2239  		break;
2240  	case 3:
2241  		base_ch = TAD_TGT3(reg);
2242  		break;
2243  	default:
2244  		sprintf(msg, "Can't discover the TAD target");
2245  		return -EINVAL;
2246  	}
2247  	*channel_mask = 1 << base_ch;
2248  
2249  	pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2250  
2251  	if (pvt->mirror_mode == FULL_MIRRORING ||
2252  	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2253  		*channel_mask |= 1 << ((base_ch + 2) % 4);
2254  		switch(ch_way) {
2255  		case 2:
2256  		case 4:
2257  			sck_xch = (1 << sck_way) * (ch_way >> 1);
2258  			break;
2259  		default:
2260  			sprintf(msg, "Invalid mirror set. Can't decode addr");
2261  			return -EINVAL;
2262  		}
2263  
2264  		pvt->is_cur_addr_mirrored = true;
2265  	} else {
2266  		sck_xch = (1 << sck_way) * ch_way;
2267  		pvt->is_cur_addr_mirrored = false;
2268  	}
2269  
2270  	if (pvt->is_lockstep)
2271  		*channel_mask |= 1 << ((base_ch + 1) % 4);
2272  
2273  	offset = TAD_OFFSET(tad_offset);
2274  
2275  	edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2276  		 n_tads,
2277  		 addr,
2278  		 limit,
2279  		 sck_way,
2280  		 ch_way,
2281  		 offset,
2282  		 idx,
2283  		 base_ch,
2284  		 *channel_mask);
2285  
2286  	/* Calculate channel address */
2287  	/* Remove the TAD offset */
2288  
2289  	if (offset > addr) {
2290  		sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2291  			offset, addr);
2292  		return -EINVAL;
2293  	}
2294  
2295  	ch_addr = addr - offset;
2296  	ch_addr >>= (6 + shiftup);
2297  	ch_addr /= sck_xch;
2298  	ch_addr <<= (6 + shiftup);
2299  	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2300  
2301  	/*
2302  	 * Step 3) Decode rank
2303  	 */
2304  	for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2305  		pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
2306  
2307  		if (!IS_RIR_VALID(reg))
2308  			continue;
2309  
2310  		limit = pvt->info.rir_limit(reg);
2311  		gb = div_u64_rem(limit >> 20, 1024, &mb);
2312  		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2313  			 n_rir,
2314  			 gb, (mb*1000)/1024,
2315  			 limit,
2316  			 1 << RIR_WAY(reg));
2317  		if  (ch_addr <= limit)
2318  			break;
2319  	}
2320  	if (n_rir == MAX_RIR_RANGES) {
2321  		sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2322  			ch_addr);
2323  		return -EINVAL;
2324  	}
2325  	rir_way = RIR_WAY(reg);
2326  
2327  	if (pvt->is_close_pg)
2328  		idx = (ch_addr >> 6);
2329  	else
2330  		idx = (ch_addr >> 13);	/* FIXME: Datasheet says to shift by 15 */
2331  	idx %= 1 << rir_way;
2332  
2333  	pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
2334  	*rank = RIR_RNK_TGT(pvt->info.type, reg);
2335  
2336  	if (pvt->info.type == BROADWELL) {
2337  		if (pvt->is_close_pg)
2338  			shiftup = 6;
2339  		else
2340  			shiftup = 13;
2341  
2342  		rank_addr = ch_addr >> shiftup;
2343  		rank_addr /= (1 << rir_way);
2344  		rank_addr <<= shiftup;
2345  		rank_addr |= ch_addr & GENMASK_ULL(shiftup - 1, 0);
2346  		rank_addr -= RIR_OFFSET(pvt->info.type, reg);
2347  
2348  		mtype = pvt->info.get_memory_type(pvt);
2349  		rankid = *rank;
2350  		if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
2351  			sb_decode_ddr4(mci, base_ch, rankid, rank_addr, msg);
2352  		else
2353  			sb_decode_ddr3(mci, base_ch, rankid, rank_addr, msg);
2354  	} else {
2355  		msg[0] = '\0';
2356  	}
2357  
2358  	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2359  		 n_rir,
2360  		 ch_addr,
2361  		 limit,
2362  		 rir_way,
2363  		 idx);
2364  
2365  	return 0;
2366  }
2367  
get_memory_error_data_from_mce(struct mem_ctl_info * mci,const struct mce * m,u8 * socket,u8 * ha,long * channel_mask,char * msg)2368  static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
2369  					  const struct mce *m, u8 *socket,
2370  					  u8 *ha, long *channel_mask,
2371  					  char *msg)
2372  {
2373  	u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
2374  	struct mem_ctl_info *new_mci;
2375  	struct sbridge_pvt *pvt;
2376  	struct pci_dev *pci_ha;
2377  	bool tad0;
2378  
2379  	if (channel >= NUM_CHANNELS) {
2380  		sprintf(msg, "Invalid channel 0x%x", channel);
2381  		return -EINVAL;
2382  	}
2383  
2384  	pvt = mci->pvt_info;
2385  	if (!pvt->info.get_ha) {
2386  		sprintf(msg, "No get_ha()");
2387  		return -EINVAL;
2388  	}
2389  	*ha = pvt->info.get_ha(m->bank);
2390  	if (*ha != 0 && *ha != 1) {
2391  		sprintf(msg, "Impossible bank %d", m->bank);
2392  		return -EINVAL;
2393  	}
2394  
2395  	*socket = m->socketid;
2396  	new_mci = get_mci_for_node_id(*socket, *ha);
2397  	if (!new_mci) {
2398  		strcpy(msg, "mci socket got corrupted!");
2399  		return -EINVAL;
2400  	}
2401  
2402  	pvt = new_mci->pvt_info;
2403  	pci_ha = pvt->pci_ha;
2404  	pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
2405  	tad0 = m->addr <= TAD_LIMIT(reg);
2406  
2407  	*channel_mask = 1 << channel;
2408  	if (pvt->mirror_mode == FULL_MIRRORING ||
2409  	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
2410  		*channel_mask |= 1 << ((channel + 2) % 4);
2411  		pvt->is_cur_addr_mirrored = true;
2412  	} else {
2413  		pvt->is_cur_addr_mirrored = false;
2414  	}
2415  
2416  	if (pvt->is_lockstep)
2417  		*channel_mask |= 1 << ((channel + 1) % 4);
2418  
2419  	return 0;
2420  }
2421  
2422  /****************************************************************************
2423  	Device initialization routines: put/get, init/exit
2424   ****************************************************************************/
2425  
2426  /*
2427   *	sbridge_put_all_devices	'put' all the devices that we have
2428   *				reserved via 'get'
2429   */
sbridge_put_devices(struct sbridge_dev * sbridge_dev)2430  static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2431  {
2432  	int i;
2433  
2434  	edac_dbg(0, "\n");
2435  	for (i = 0; i < sbridge_dev->n_devs; i++) {
2436  		struct pci_dev *pdev = sbridge_dev->pdev[i];
2437  		if (!pdev)
2438  			continue;
2439  		edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2440  			 pdev->bus->number,
2441  			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2442  		pci_dev_put(pdev);
2443  	}
2444  }
2445  
sbridge_put_all_devices(void)2446  static void sbridge_put_all_devices(void)
2447  {
2448  	struct sbridge_dev *sbridge_dev, *tmp;
2449  
2450  	list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2451  		sbridge_put_devices(sbridge_dev);
2452  		free_sbridge_dev(sbridge_dev);
2453  	}
2454  }
2455  
sbridge_get_onedevice(struct pci_dev ** prev,u8 * num_mc,const struct pci_id_table * table,const unsigned devno,const int multi_bus)2456  static int sbridge_get_onedevice(struct pci_dev **prev,
2457  				 u8 *num_mc,
2458  				 const struct pci_id_table *table,
2459  				 const unsigned devno,
2460  				 const int multi_bus)
2461  {
2462  	struct sbridge_dev *sbridge_dev = NULL;
2463  	const struct pci_id_descr *dev_descr = &table->descr[devno];
2464  	struct pci_dev *pdev = NULL;
2465  	int seg = 0;
2466  	u8 bus = 0;
2467  	int i = 0;
2468  
2469  	sbridge_printk(KERN_DEBUG,
2470  		"Seeking for: PCI ID %04x:%04x\n",
2471  		PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2472  
2473  	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2474  			      dev_descr->dev_id, *prev);
2475  
2476  	if (!pdev) {
2477  		if (*prev) {
2478  			*prev = pdev;
2479  			return 0;
2480  		}
2481  
2482  		if (dev_descr->optional)
2483  			return 0;
2484  
2485  		/* if the HA wasn't found */
2486  		if (devno == 0)
2487  			return -ENODEV;
2488  
2489  		sbridge_printk(KERN_INFO,
2490  			"Device not found: %04x:%04x\n",
2491  			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2492  
2493  		/* End of list, leave */
2494  		return -ENODEV;
2495  	}
2496  	seg = pci_domain_nr(pdev->bus);
2497  	bus = pdev->bus->number;
2498  
2499  next_imc:
2500  	sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
2501  				      multi_bus, sbridge_dev);
2502  	if (!sbridge_dev) {
2503  		/* If the HA1 wasn't found, don't create EDAC second memory controller */
2504  		if (dev_descr->dom == IMC1 && devno != 1) {
2505  			edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2506  				 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2507  			pci_dev_put(pdev);
2508  			return 0;
2509  		}
2510  
2511  		if (dev_descr->dom == SOCK)
2512  			goto out_imc;
2513  
2514  		sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
2515  		if (!sbridge_dev) {
2516  			pci_dev_put(pdev);
2517  			return -ENOMEM;
2518  		}
2519  		(*num_mc)++;
2520  	}
2521  
2522  	if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2523  		sbridge_printk(KERN_ERR,
2524  			"Duplicated device for %04x:%04x\n",
2525  			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2526  		pci_dev_put(pdev);
2527  		return -ENODEV;
2528  	}
2529  
2530  	sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2531  
2532  	/* pdev belongs to more than one IMC, do extra gets */
2533  	if (++i > 1)
2534  		pci_dev_get(pdev);
2535  
2536  	if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2537  		goto next_imc;
2538  
2539  out_imc:
2540  	/* Be sure that the device is enabled */
2541  	if (unlikely(pci_enable_device(pdev) < 0)) {
2542  		sbridge_printk(KERN_ERR,
2543  			"Couldn't enable %04x:%04x\n",
2544  			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2545  		return -ENODEV;
2546  	}
2547  
2548  	edac_dbg(0, "Detected %04x:%04x\n",
2549  		 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2550  
2551  	/*
2552  	 * As stated on drivers/pci/search.c, the reference count for
2553  	 * @from is always decremented if it is not %NULL. So, as we need
2554  	 * to get all devices up to null, we need to do a get for the device
2555  	 */
2556  	pci_dev_get(pdev);
2557  
2558  	*prev = pdev;
2559  
2560  	return 0;
2561  }
2562  
2563  /*
2564   * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2565   *			     devices we want to reference for this driver.
2566   * @num_mc: pointer to the memory controllers count, to be incremented in case
2567   *	    of success.
2568   * @table: model specific table
2569   *
2570   * returns 0 in case of success or error code
2571   */
sbridge_get_all_devices(u8 * num_mc,const struct pci_id_table * table)2572  static int sbridge_get_all_devices(u8 *num_mc,
2573  					const struct pci_id_table *table)
2574  {
2575  	int i, rc;
2576  	struct pci_dev *pdev = NULL;
2577  	int allow_dups = 0;
2578  	int multi_bus = 0;
2579  
2580  	if (table->type == KNIGHTS_LANDING)
2581  		allow_dups = multi_bus = 1;
2582  	while (table && table->descr) {
2583  		for (i = 0; i < table->n_devs_per_sock; i++) {
2584  			if (!allow_dups || i == 0 ||
2585  					table->descr[i].dev_id !=
2586  						table->descr[i-1].dev_id) {
2587  				pdev = NULL;
2588  			}
2589  			do {
2590  				rc = sbridge_get_onedevice(&pdev, num_mc,
2591  							   table, i, multi_bus);
2592  				if (rc < 0) {
2593  					if (i == 0) {
2594  						i = table->n_devs_per_sock;
2595  						break;
2596  					}
2597  					sbridge_put_all_devices();
2598  					return -ENODEV;
2599  				}
2600  			} while (pdev && !allow_dups);
2601  		}
2602  		table++;
2603  	}
2604  
2605  	return 0;
2606  }
2607  
2608  /*
2609   * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
2610   * the format: XXXa. So we can convert from a device to the corresponding
2611   * channel like this
2612   */
2613  #define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2614  
sbridge_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2615  static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2616  				 struct sbridge_dev *sbridge_dev)
2617  {
2618  	struct sbridge_pvt *pvt = mci->pvt_info;
2619  	struct pci_dev *pdev;
2620  	u8 saw_chan_mask = 0;
2621  	int i;
2622  
2623  	for (i = 0; i < sbridge_dev->n_devs; i++) {
2624  		pdev = sbridge_dev->pdev[i];
2625  		if (!pdev)
2626  			continue;
2627  
2628  		switch (pdev->device) {
2629  		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2630  			pvt->pci_sad0 = pdev;
2631  			break;
2632  		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2633  			pvt->pci_sad1 = pdev;
2634  			break;
2635  		case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2636  			pvt->pci_br0 = pdev;
2637  			break;
2638  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2639  			pvt->pci_ha = pdev;
2640  			break;
2641  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2642  			pvt->pci_ta = pdev;
2643  			break;
2644  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2645  			pvt->pci_ras = pdev;
2646  			break;
2647  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2648  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2649  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2650  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2651  		{
2652  			int id = TAD_DEV_TO_CHAN(pdev->device);
2653  			pvt->pci_tad[id] = pdev;
2654  			saw_chan_mask |= 1 << id;
2655  		}
2656  			break;
2657  		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2658  			pvt->pci_ddrio = pdev;
2659  			break;
2660  		default:
2661  			goto error;
2662  		}
2663  
2664  		edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2665  			 pdev->vendor, pdev->device,
2666  			 sbridge_dev->bus,
2667  			 pdev);
2668  	}
2669  
2670  	/* Check if everything were registered */
2671  	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2672  	    !pvt->pci_ras || !pvt->pci_ta)
2673  		goto enodev;
2674  
2675  	if (saw_chan_mask != 0x0f)
2676  		goto enodev;
2677  	return 0;
2678  
2679  enodev:
2680  	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2681  	return -ENODEV;
2682  
2683  error:
2684  	sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2685  		       PCI_VENDOR_ID_INTEL, pdev->device);
2686  	return -EINVAL;
2687  }
2688  
ibridge_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2689  static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2690  				 struct sbridge_dev *sbridge_dev)
2691  {
2692  	struct sbridge_pvt *pvt = mci->pvt_info;
2693  	struct pci_dev *pdev;
2694  	u8 saw_chan_mask = 0;
2695  	int i;
2696  
2697  	for (i = 0; i < sbridge_dev->n_devs; i++) {
2698  		pdev = sbridge_dev->pdev[i];
2699  		if (!pdev)
2700  			continue;
2701  
2702  		switch (pdev->device) {
2703  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2704  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2705  			pvt->pci_ha = pdev;
2706  			break;
2707  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2708  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2709  			pvt->pci_ta = pdev;
2710  			break;
2711  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2712  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2713  			pvt->pci_ras = pdev;
2714  			break;
2715  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2716  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2717  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2718  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2719  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2720  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2721  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2722  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2723  		{
2724  			int id = TAD_DEV_TO_CHAN(pdev->device);
2725  			pvt->pci_tad[id] = pdev;
2726  			saw_chan_mask |= 1 << id;
2727  		}
2728  			break;
2729  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2730  			pvt->pci_ddrio = pdev;
2731  			break;
2732  		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2733  			pvt->pci_ddrio = pdev;
2734  			break;
2735  		case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2736  			pvt->pci_sad0 = pdev;
2737  			break;
2738  		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2739  			pvt->pci_br0 = pdev;
2740  			break;
2741  		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2742  			pvt->pci_br1 = pdev;
2743  			break;
2744  		default:
2745  			goto error;
2746  		}
2747  
2748  		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2749  			 sbridge_dev->bus,
2750  			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2751  			 pdev);
2752  	}
2753  
2754  	/* Check if everything were registered */
2755  	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2756  	    !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2757  		goto enodev;
2758  
2759  	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2760  	    saw_chan_mask != 0x03)   /* -EP */
2761  		goto enodev;
2762  	return 0;
2763  
2764  enodev:
2765  	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2766  	return -ENODEV;
2767  
2768  error:
2769  	sbridge_printk(KERN_ERR,
2770  		       "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2771  			pdev->device);
2772  	return -EINVAL;
2773  }
2774  
haswell_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2775  static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2776  				 struct sbridge_dev *sbridge_dev)
2777  {
2778  	struct sbridge_pvt *pvt = mci->pvt_info;
2779  	struct pci_dev *pdev;
2780  	u8 saw_chan_mask = 0;
2781  	int i;
2782  
2783  	/* there's only one device per system; not tied to any bus */
2784  	if (pvt->info.pci_vtd == NULL)
2785  		/* result will be checked later */
2786  		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2787  						   PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2788  						   NULL);
2789  
2790  	for (i = 0; i < sbridge_dev->n_devs; i++) {
2791  		pdev = sbridge_dev->pdev[i];
2792  		if (!pdev)
2793  			continue;
2794  
2795  		switch (pdev->device) {
2796  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2797  			pvt->pci_sad0 = pdev;
2798  			break;
2799  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2800  			pvt->pci_sad1 = pdev;
2801  			break;
2802  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2803  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2804  			pvt->pci_ha = pdev;
2805  			break;
2806  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2807  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2808  			pvt->pci_ta = pdev;
2809  			break;
2810  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2811  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2812  			pvt->pci_ras = pdev;
2813  			break;
2814  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2815  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2816  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2817  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2818  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2819  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2820  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2821  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2822  		{
2823  			int id = TAD_DEV_TO_CHAN(pdev->device);
2824  			pvt->pci_tad[id] = pdev;
2825  			saw_chan_mask |= 1 << id;
2826  		}
2827  			break;
2828  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2829  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2830  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2831  		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2832  			if (!pvt->pci_ddrio)
2833  				pvt->pci_ddrio = pdev;
2834  			break;
2835  		default:
2836  			break;
2837  		}
2838  
2839  		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2840  			 sbridge_dev->bus,
2841  			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2842  			 pdev);
2843  	}
2844  
2845  	/* Check if everything were registered */
2846  	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2847  	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2848  		goto enodev;
2849  
2850  	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2851  	    saw_chan_mask != 0x03)   /* -EP */
2852  		goto enodev;
2853  	return 0;
2854  
2855  enodev:
2856  	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2857  	return -ENODEV;
2858  }
2859  
broadwell_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2860  static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2861  				 struct sbridge_dev *sbridge_dev)
2862  {
2863  	struct sbridge_pvt *pvt = mci->pvt_info;
2864  	struct pci_dev *pdev;
2865  	u8 saw_chan_mask = 0;
2866  	int i;
2867  
2868  	/* there's only one device per system; not tied to any bus */
2869  	if (pvt->info.pci_vtd == NULL)
2870  		/* result will be checked later */
2871  		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2872  						   PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2873  						   NULL);
2874  
2875  	for (i = 0; i < sbridge_dev->n_devs; i++) {
2876  		pdev = sbridge_dev->pdev[i];
2877  		if (!pdev)
2878  			continue;
2879  
2880  		switch (pdev->device) {
2881  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2882  			pvt->pci_sad0 = pdev;
2883  			break;
2884  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2885  			pvt->pci_sad1 = pdev;
2886  			break;
2887  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2888  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2889  			pvt->pci_ha = pdev;
2890  			break;
2891  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2892  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2893  			pvt->pci_ta = pdev;
2894  			break;
2895  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2896  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2897  			pvt->pci_ras = pdev;
2898  			break;
2899  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2900  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2901  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2902  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2903  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2904  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2905  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2906  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2907  		{
2908  			int id = TAD_DEV_TO_CHAN(pdev->device);
2909  			pvt->pci_tad[id] = pdev;
2910  			saw_chan_mask |= 1 << id;
2911  		}
2912  			break;
2913  		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2914  			pvt->pci_ddrio = pdev;
2915  			break;
2916  		default:
2917  			break;
2918  		}
2919  
2920  		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2921  			 sbridge_dev->bus,
2922  			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2923  			 pdev);
2924  	}
2925  
2926  	/* Check if everything were registered */
2927  	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2928  	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2929  		goto enodev;
2930  
2931  	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2932  	    saw_chan_mask != 0x03)   /* -EP */
2933  		goto enodev;
2934  	return 0;
2935  
2936  enodev:
2937  	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2938  	return -ENODEV;
2939  }
2940  
knl_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2941  static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2942  			struct sbridge_dev *sbridge_dev)
2943  {
2944  	struct sbridge_pvt *pvt = mci->pvt_info;
2945  	struct pci_dev *pdev;
2946  	int dev, func;
2947  
2948  	int i;
2949  	int devidx;
2950  
2951  	for (i = 0; i < sbridge_dev->n_devs; i++) {
2952  		pdev = sbridge_dev->pdev[i];
2953  		if (!pdev)
2954  			continue;
2955  
2956  		/* Extract PCI device and function. */
2957  		dev = (pdev->devfn >> 3) & 0x1f;
2958  		func = pdev->devfn & 0x7;
2959  
2960  		switch (pdev->device) {
2961  		case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2962  			if (dev == 8)
2963  				pvt->knl.pci_mc0 = pdev;
2964  			else if (dev == 9)
2965  				pvt->knl.pci_mc1 = pdev;
2966  			else {
2967  				sbridge_printk(KERN_ERR,
2968  					"Memory controller in unexpected place! (dev %d, fn %d)\n",
2969  					dev, func);
2970  				continue;
2971  			}
2972  			break;
2973  
2974  		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2975  			pvt->pci_sad0 = pdev;
2976  			break;
2977  
2978  		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2979  			pvt->pci_sad1 = pdev;
2980  			break;
2981  
2982  		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2983  			/* There are one of these per tile, and range from
2984  			 * 1.14.0 to 1.18.5.
2985  			 */
2986  			devidx = ((dev-14)*8)+func;
2987  
2988  			if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2989  				sbridge_printk(KERN_ERR,
2990  					"Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2991  					dev, func);
2992  				continue;
2993  			}
2994  
2995  			WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2996  
2997  			pvt->knl.pci_cha[devidx] = pdev;
2998  			break;
2999  
3000  		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
3001  			devidx = -1;
3002  
3003  			/*
3004  			 *  MC0 channels 0-2 are device 9 function 2-4,
3005  			 *  MC1 channels 3-5 are device 8 function 2-4.
3006  			 */
3007  
3008  			if (dev == 9)
3009  				devidx = func-2;
3010  			else if (dev == 8)
3011  				devidx = 3 + (func-2);
3012  
3013  			if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
3014  				sbridge_printk(KERN_ERR,
3015  					"DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
3016  					dev, func);
3017  				continue;
3018  			}
3019  
3020  			WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
3021  			pvt->knl.pci_channel[devidx] = pdev;
3022  			break;
3023  
3024  		case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
3025  			pvt->knl.pci_mc_info = pdev;
3026  			break;
3027  
3028  		case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
3029  			pvt->pci_ta = pdev;
3030  			break;
3031  
3032  		default:
3033  			sbridge_printk(KERN_ERR, "Unexpected device %d\n",
3034  				pdev->device);
3035  			break;
3036  		}
3037  	}
3038  
3039  	if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
3040  	    !pvt->pci_sad0     || !pvt->pci_sad1    ||
3041  	    !pvt->pci_ta) {
3042  		goto enodev;
3043  	}
3044  
3045  	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
3046  		if (!pvt->knl.pci_channel[i]) {
3047  			sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
3048  			goto enodev;
3049  		}
3050  	}
3051  
3052  	for (i = 0; i < KNL_MAX_CHAS; i++) {
3053  		if (!pvt->knl.pci_cha[i]) {
3054  			sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
3055  			goto enodev;
3056  		}
3057  	}
3058  
3059  	return 0;
3060  
3061  enodev:
3062  	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
3063  	return -ENODEV;
3064  }
3065  
3066  /****************************************************************************
3067  			Error check routines
3068   ****************************************************************************/
3069  
3070  /*
3071   * While Sandy Bridge has error count registers, SMI BIOS read values from
3072   * and resets the counters. So, they are not reliable for the OS to read
3073   * from them. So, we have no option but to just trust on whatever MCE is
3074   * telling us about the errors.
3075   */
sbridge_mce_output_error(struct mem_ctl_info * mci,const struct mce * m)3076  static void sbridge_mce_output_error(struct mem_ctl_info *mci,
3077  				    const struct mce *m)
3078  {
3079  	struct mem_ctl_info *new_mci;
3080  	struct sbridge_pvt *pvt = mci->pvt_info;
3081  	enum hw_event_mc_err_type tp_event;
3082  	char *optype, msg[256], msg_full[512];
3083  	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
3084  	bool overflow = GET_BITFIELD(m->status, 62, 62);
3085  	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
3086  	bool recoverable;
3087  	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
3088  	u32 mscod = GET_BITFIELD(m->status, 16, 31);
3089  	u32 errcode = GET_BITFIELD(m->status, 0, 15);
3090  	u32 channel = GET_BITFIELD(m->status, 0, 3);
3091  	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
3092  	/*
3093  	 * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
3094  	 * A value 6 is for cache line aligned address, a value 12 is for page
3095  	 * aligned address reported by patrol scrubber.
3096  	 */
3097  	u32 lsb = GET_BITFIELD(m->misc, 0, 5);
3098  	long channel_mask, first_channel;
3099  	u8  rank = 0xff, socket, ha;
3100  	int rc, dimm;
3101  	char *area_type = "DRAM";
3102  
3103  	if (pvt->info.type != SANDY_BRIDGE)
3104  		recoverable = true;
3105  	else
3106  		recoverable = GET_BITFIELD(m->status, 56, 56);
3107  
3108  	if (uncorrected_error) {
3109  		core_err_cnt = 1;
3110  		if (ripv) {
3111  			tp_event = HW_EVENT_ERR_UNCORRECTED;
3112  		} else {
3113  			tp_event = HW_EVENT_ERR_FATAL;
3114  		}
3115  	} else {
3116  		tp_event = HW_EVENT_ERR_CORRECTED;
3117  	}
3118  
3119  	/*
3120  	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
3121  	 * memory errors should fit in this mask:
3122  	 *	000f 0000 1mmm cccc (binary)
3123  	 * where:
3124  	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
3125  	 *	    won't be shown
3126  	 *	mmm = error type
3127  	 *	cccc = channel
3128  	 * If the mask doesn't match, report an error to the parsing logic
3129  	 */
3130  	switch (optypenum) {
3131  	case 0:
3132  		optype = "generic undef request error";
3133  		break;
3134  	case 1:
3135  		optype = "memory read error";
3136  		break;
3137  	case 2:
3138  		optype = "memory write error";
3139  		break;
3140  	case 3:
3141  		optype = "addr/cmd error";
3142  		break;
3143  	case 4:
3144  		optype = "memory scrubbing error";
3145  		break;
3146  	default:
3147  		optype = "reserved";
3148  		break;
3149  	}
3150  
3151  	if (pvt->info.type == KNIGHTS_LANDING) {
3152  		if (channel == 14) {
3153  			edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
3154  				overflow ? " OVERFLOW" : "",
3155  				(uncorrected_error && recoverable)
3156  				? " recoverable" : "",
3157  				mscod, errcode,
3158  				m->bank);
3159  		} else {
3160  			char A = *("A");
3161  
3162  			/*
3163  			 * Reported channel is in range 0-2, so we can't map it
3164  			 * back to mc. To figure out mc we check machine check
3165  			 * bank register that reported this error.
3166  			 * bank15 means mc0 and bank16 means mc1.
3167  			 */
3168  			channel = knl_channel_remap(m->bank == 16, channel);
3169  			channel_mask = 1 << channel;
3170  
3171  			snprintf(msg, sizeof(msg),
3172  				"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3173  				overflow ? " OVERFLOW" : "",
3174  				(uncorrected_error && recoverable)
3175  				? " recoverable" : " ",
3176  				mscod, errcode, channel, A + channel);
3177  			edac_mc_handle_error(tp_event, mci, core_err_cnt,
3178  				m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3179  				channel, 0, -1,
3180  				optype, msg);
3181  		}
3182  		return;
3183  	} else if (lsb < 12) {
3184  		rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3185  					   &channel_mask, &rank,
3186  					   &area_type, msg);
3187  	} else {
3188  		rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
3189  						    &channel_mask, msg);
3190  	}
3191  
3192  	if (rc < 0)
3193  		goto err_parsing;
3194  	new_mci = get_mci_for_node_id(socket, ha);
3195  	if (!new_mci) {
3196  		strcpy(msg, "Error: socket got corrupted!");
3197  		goto err_parsing;
3198  	}
3199  	mci = new_mci;
3200  	pvt = mci->pvt_info;
3201  
3202  	first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3203  
3204  	if (rank == 0xff)
3205  		dimm = -1;
3206  	else if (rank < 4)
3207  		dimm = 0;
3208  	else if (rank < 8)
3209  		dimm = 1;
3210  	else
3211  		dimm = 2;
3212  
3213  	/*
3214  	 * FIXME: On some memory configurations (mirror, lockstep), the
3215  	 * Memory Controller can't point the error to a single DIMM. The
3216  	 * EDAC core should be handling the channel mask, in order to point
3217  	 * to the group of dimm's where the error may be happening.
3218  	 */
3219  	if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3220  		channel = first_channel;
3221  	snprintf(msg_full, sizeof(msg_full),
3222  		 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s",
3223  		 overflow ? " OVERFLOW" : "",
3224  		 (uncorrected_error && recoverable) ? " recoverable" : "",
3225  		 area_type,
3226  		 mscod, errcode,
3227  		 socket, ha,
3228  		 channel_mask,
3229  		 rank, msg);
3230  
3231  	edac_dbg(0, "%s\n", msg_full);
3232  
3233  	/* FIXME: need support for channel mask */
3234  
3235  	if (channel == CHANNEL_UNSPECIFIED)
3236  		channel = -1;
3237  
3238  	/* Call the helper to output message */
3239  	edac_mc_handle_error(tp_event, mci, core_err_cnt,
3240  			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3241  			     channel, dimm, -1,
3242  			     optype, msg_full);
3243  	return;
3244  err_parsing:
3245  	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3246  			     -1, -1, -1,
3247  			     msg, "");
3248  
3249  }
3250  
3251  /*
3252   * Check that logging is enabled and that this is the right type
3253   * of error for us to handle.
3254   */
sbridge_mce_check_error(struct notifier_block * nb,unsigned long val,void * data)3255  static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3256  				   void *data)
3257  {
3258  	struct mce *mce = (struct mce *)data;
3259  	struct mem_ctl_info *mci;
3260  	char *type;
3261  
3262  	if (mce->kflags & MCE_HANDLED_CEC)
3263  		return NOTIFY_DONE;
3264  
3265  	/*
3266  	 * Just let mcelog handle it if the error is
3267  	 * outside the memory controller. A memory error
3268  	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3269  	 * bit 12 has an special meaning.
3270  	 */
3271  	if ((mce->status & 0xefff) >> 7 != 1)
3272  		return NOTIFY_DONE;
3273  
3274  	/* Check ADDRV bit in STATUS */
3275  	if (!GET_BITFIELD(mce->status, 58, 58))
3276  		return NOTIFY_DONE;
3277  
3278  	/* Check MISCV bit in STATUS */
3279  	if (!GET_BITFIELD(mce->status, 59, 59))
3280  		return NOTIFY_DONE;
3281  
3282  	/* Check address type in MISC (physical address only) */
3283  	if (GET_BITFIELD(mce->misc, 6, 8) != 2)
3284  		return NOTIFY_DONE;
3285  
3286  	mci = get_mci_for_node_id(mce->socketid, IMC0);
3287  	if (!mci)
3288  		return NOTIFY_DONE;
3289  
3290  	if (mce->mcgstatus & MCG_STATUS_MCIP)
3291  		type = "Exception";
3292  	else
3293  		type = "Event";
3294  
3295  	sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3296  
3297  	sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3298  			  "Bank %d: %016Lx\n", mce->extcpu, type,
3299  			  mce->mcgstatus, mce->bank, mce->status);
3300  	sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3301  	sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3302  	sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3303  
3304  	sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3305  			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3306  			  mce->time, mce->socketid, mce->apicid);
3307  
3308  	sbridge_mce_output_error(mci, mce);
3309  
3310  	/* Advice mcelog that the error were handled */
3311  	mce->kflags |= MCE_HANDLED_EDAC;
3312  	return NOTIFY_OK;
3313  }
3314  
3315  static struct notifier_block sbridge_mce_dec = {
3316  	.notifier_call	= sbridge_mce_check_error,
3317  	.priority	= MCE_PRIO_EDAC,
3318  };
3319  
3320  /****************************************************************************
3321  			EDAC register/unregister logic
3322   ****************************************************************************/
3323  
sbridge_unregister_mci(struct sbridge_dev * sbridge_dev)3324  static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3325  {
3326  	struct mem_ctl_info *mci = sbridge_dev->mci;
3327  
3328  	if (unlikely(!mci || !mci->pvt_info)) {
3329  		edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3330  
3331  		sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3332  		return;
3333  	}
3334  
3335  	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3336  		 mci, &sbridge_dev->pdev[0]->dev);
3337  
3338  	/* Remove MC sysfs nodes */
3339  	edac_mc_del_mc(mci->pdev);
3340  
3341  	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3342  	kfree(mci->ctl_name);
3343  	edac_mc_free(mci);
3344  	sbridge_dev->mci = NULL;
3345  }
3346  
sbridge_register_mci(struct sbridge_dev * sbridge_dev,enum type type)3347  static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3348  {
3349  	struct mem_ctl_info *mci;
3350  	struct edac_mc_layer layers[2];
3351  	struct sbridge_pvt *pvt;
3352  	struct pci_dev *pdev = sbridge_dev->pdev[0];
3353  	int rc;
3354  
3355  	/* allocate a new MC control structure */
3356  	layers[0].type = EDAC_MC_LAYER_CHANNEL;
3357  	layers[0].size = type == KNIGHTS_LANDING ?
3358  		KNL_MAX_CHANNELS : NUM_CHANNELS;
3359  	layers[0].is_virt_csrow = false;
3360  	layers[1].type = EDAC_MC_LAYER_SLOT;
3361  	layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3362  	layers[1].is_virt_csrow = true;
3363  	mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3364  			    sizeof(*pvt));
3365  
3366  	if (unlikely(!mci))
3367  		return -ENOMEM;
3368  
3369  	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3370  		 mci, &pdev->dev);
3371  
3372  	pvt = mci->pvt_info;
3373  	memset(pvt, 0, sizeof(*pvt));
3374  
3375  	/* Associate sbridge_dev and mci for future usage */
3376  	pvt->sbridge_dev = sbridge_dev;
3377  	sbridge_dev->mci = mci;
3378  
3379  	mci->mtype_cap = type == KNIGHTS_LANDING ?
3380  		MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3381  	mci->edac_ctl_cap = EDAC_FLAG_NONE;
3382  	mci->edac_cap = EDAC_FLAG_NONE;
3383  	mci->mod_name = EDAC_MOD_STR;
3384  	mci->dev_name = pci_name(pdev);
3385  	mci->ctl_page_to_phys = NULL;
3386  
3387  	pvt->info.type = type;
3388  	switch (type) {
3389  	case IVY_BRIDGE:
3390  		pvt->info.rankcfgr = IB_RANK_CFG_A;
3391  		pvt->info.get_tolm = ibridge_get_tolm;
3392  		pvt->info.get_tohm = ibridge_get_tohm;
3393  		pvt->info.dram_rule = ibridge_dram_rule;
3394  		pvt->info.get_memory_type = get_memory_type;
3395  		pvt->info.get_node_id = get_node_id;
3396  		pvt->info.get_ha = ibridge_get_ha;
3397  		pvt->info.rir_limit = rir_limit;
3398  		pvt->info.sad_limit = sad_limit;
3399  		pvt->info.interleave_mode = interleave_mode;
3400  		pvt->info.dram_attr = dram_attr;
3401  		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3402  		pvt->info.interleave_list = ibridge_interleave_list;
3403  		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3404  		pvt->info.get_width = ibridge_get_width;
3405  
3406  		/* Store pci devices at mci for faster access */
3407  		rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3408  		if (unlikely(rc < 0))
3409  			goto fail0;
3410  		get_source_id(mci);
3411  		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3412  			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3413  		break;
3414  	case SANDY_BRIDGE:
3415  		pvt->info.rankcfgr = SB_RANK_CFG_A;
3416  		pvt->info.get_tolm = sbridge_get_tolm;
3417  		pvt->info.get_tohm = sbridge_get_tohm;
3418  		pvt->info.dram_rule = sbridge_dram_rule;
3419  		pvt->info.get_memory_type = get_memory_type;
3420  		pvt->info.get_node_id = get_node_id;
3421  		pvt->info.get_ha = sbridge_get_ha;
3422  		pvt->info.rir_limit = rir_limit;
3423  		pvt->info.sad_limit = sad_limit;
3424  		pvt->info.interleave_mode = interleave_mode;
3425  		pvt->info.dram_attr = dram_attr;
3426  		pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3427  		pvt->info.interleave_list = sbridge_interleave_list;
3428  		pvt->info.interleave_pkg = sbridge_interleave_pkg;
3429  		pvt->info.get_width = sbridge_get_width;
3430  
3431  		/* Store pci devices at mci for faster access */
3432  		rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3433  		if (unlikely(rc < 0))
3434  			goto fail0;
3435  		get_source_id(mci);
3436  		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3437  			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3438  		break;
3439  	case HASWELL:
3440  		/* rankcfgr isn't used */
3441  		pvt->info.get_tolm = haswell_get_tolm;
3442  		pvt->info.get_tohm = haswell_get_tohm;
3443  		pvt->info.dram_rule = ibridge_dram_rule;
3444  		pvt->info.get_memory_type = haswell_get_memory_type;
3445  		pvt->info.get_node_id = haswell_get_node_id;
3446  		pvt->info.get_ha = ibridge_get_ha;
3447  		pvt->info.rir_limit = haswell_rir_limit;
3448  		pvt->info.sad_limit = sad_limit;
3449  		pvt->info.interleave_mode = interleave_mode;
3450  		pvt->info.dram_attr = dram_attr;
3451  		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3452  		pvt->info.interleave_list = ibridge_interleave_list;
3453  		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3454  		pvt->info.get_width = ibridge_get_width;
3455  
3456  		/* Store pci devices at mci for faster access */
3457  		rc = haswell_mci_bind_devs(mci, sbridge_dev);
3458  		if (unlikely(rc < 0))
3459  			goto fail0;
3460  		get_source_id(mci);
3461  		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3462  			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3463  		break;
3464  	case BROADWELL:
3465  		/* rankcfgr isn't used */
3466  		pvt->info.get_tolm = haswell_get_tolm;
3467  		pvt->info.get_tohm = haswell_get_tohm;
3468  		pvt->info.dram_rule = ibridge_dram_rule;
3469  		pvt->info.get_memory_type = haswell_get_memory_type;
3470  		pvt->info.get_node_id = haswell_get_node_id;
3471  		pvt->info.get_ha = ibridge_get_ha;
3472  		pvt->info.rir_limit = haswell_rir_limit;
3473  		pvt->info.sad_limit = sad_limit;
3474  		pvt->info.interleave_mode = interleave_mode;
3475  		pvt->info.dram_attr = dram_attr;
3476  		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3477  		pvt->info.interleave_list = ibridge_interleave_list;
3478  		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3479  		pvt->info.get_width = broadwell_get_width;
3480  
3481  		/* Store pci devices at mci for faster access */
3482  		rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3483  		if (unlikely(rc < 0))
3484  			goto fail0;
3485  		get_source_id(mci);
3486  		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3487  			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3488  		break;
3489  	case KNIGHTS_LANDING:
3490  		/* pvt->info.rankcfgr == ??? */
3491  		pvt->info.get_tolm = knl_get_tolm;
3492  		pvt->info.get_tohm = knl_get_tohm;
3493  		pvt->info.dram_rule = knl_dram_rule;
3494  		pvt->info.get_memory_type = knl_get_memory_type;
3495  		pvt->info.get_node_id = knl_get_node_id;
3496  		pvt->info.get_ha = knl_get_ha;
3497  		pvt->info.rir_limit = NULL;
3498  		pvt->info.sad_limit = knl_sad_limit;
3499  		pvt->info.interleave_mode = knl_interleave_mode;
3500  		pvt->info.dram_attr = dram_attr_knl;
3501  		pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3502  		pvt->info.interleave_list = knl_interleave_list;
3503  		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3504  		pvt->info.get_width = knl_get_width;
3505  
3506  		rc = knl_mci_bind_devs(mci, sbridge_dev);
3507  		if (unlikely(rc < 0))
3508  			goto fail0;
3509  		get_source_id(mci);
3510  		mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3511  			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3512  		break;
3513  	}
3514  
3515  	if (!mci->ctl_name) {
3516  		rc = -ENOMEM;
3517  		goto fail0;
3518  	}
3519  
3520  	/* Get dimm basic config and the memory layout */
3521  	rc = get_dimm_config(mci);
3522  	if (rc < 0) {
3523  		edac_dbg(0, "MC: failed to get_dimm_config()\n");
3524  		goto fail;
3525  	}
3526  	get_memory_layout(mci);
3527  
3528  	/* record ptr to the generic device */
3529  	mci->pdev = &pdev->dev;
3530  
3531  	/* add this new MC control structure to EDAC's list of MCs */
3532  	if (unlikely(edac_mc_add_mc(mci))) {
3533  		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3534  		rc = -EINVAL;
3535  		goto fail;
3536  	}
3537  
3538  	return 0;
3539  
3540  fail:
3541  	kfree(mci->ctl_name);
3542  fail0:
3543  	edac_mc_free(mci);
3544  	sbridge_dev->mci = NULL;
3545  	return rc;
3546  }
3547  
3548  static const struct x86_cpu_id sbridge_cpuids[] = {
3549  	X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
3550  	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X,	  &pci_dev_descr_ibridge_table),
3551  	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X,	  &pci_dev_descr_haswell_table),
3552  	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,	  &pci_dev_descr_broadwell_table),
3553  	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D,	  &pci_dev_descr_broadwell_table),
3554  	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL,  &pci_dev_descr_knl_table),
3555  	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM,  &pci_dev_descr_knl_table),
3556  	{ }
3557  };
3558  MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3559  
3560  /*
3561   *	sbridge_probe	Get all devices and register memory controllers
3562   *			present.
3563   *	return:
3564   *		0 for FOUND a device
3565   *		< 0 for error code
3566   */
3567  
sbridge_probe(const struct x86_cpu_id * id)3568  static int sbridge_probe(const struct x86_cpu_id *id)
3569  {
3570  	int rc;
3571  	u8 mc, num_mc = 0;
3572  	struct sbridge_dev *sbridge_dev;
3573  	struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3574  
3575  	/* get the pci devices we want to reserve for our use */
3576  	rc = sbridge_get_all_devices(&num_mc, ptable);
3577  
3578  	if (unlikely(rc < 0)) {
3579  		edac_dbg(0, "couldn't get all devices\n");
3580  		goto fail0;
3581  	}
3582  
3583  	mc = 0;
3584  
3585  	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3586  		edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3587  			 mc, mc + 1, num_mc);
3588  
3589  		sbridge_dev->mc = mc++;
3590  		rc = sbridge_register_mci(sbridge_dev, ptable->type);
3591  		if (unlikely(rc < 0))
3592  			goto fail1;
3593  	}
3594  
3595  	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3596  
3597  	return 0;
3598  
3599  fail1:
3600  	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3601  		sbridge_unregister_mci(sbridge_dev);
3602  
3603  	sbridge_put_all_devices();
3604  fail0:
3605  	return rc;
3606  }
3607  
3608  /*
3609   *	sbridge_remove	cleanup
3610   *
3611   */
sbridge_remove(void)3612  static void sbridge_remove(void)
3613  {
3614  	struct sbridge_dev *sbridge_dev;
3615  
3616  	edac_dbg(0, "\n");
3617  
3618  	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3619  		sbridge_unregister_mci(sbridge_dev);
3620  
3621  	/* Release PCI resources */
3622  	sbridge_put_all_devices();
3623  }
3624  
3625  /*
3626   *	sbridge_init		Module entry function
3627   *			Try to initialize this module for its devices
3628   */
sbridge_init(void)3629  static int __init sbridge_init(void)
3630  {
3631  	const struct x86_cpu_id *id;
3632  	const char *owner;
3633  	int rc;
3634  
3635  	edac_dbg(2, "\n");
3636  
3637  	if (ghes_get_devices())
3638  		return -EBUSY;
3639  
3640  	owner = edac_get_owner();
3641  	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3642  		return -EBUSY;
3643  
3644  	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
3645  		return -ENODEV;
3646  
3647  	id = x86_match_cpu(sbridge_cpuids);
3648  	if (!id)
3649  		return -ENODEV;
3650  
3651  	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
3652  	opstate_init();
3653  
3654  	rc = sbridge_probe(id);
3655  
3656  	if (rc >= 0) {
3657  		mce_register_decode_chain(&sbridge_mce_dec);
3658  		return 0;
3659  	}
3660  
3661  	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3662  		      rc);
3663  
3664  	return rc;
3665  }
3666  
3667  /*
3668   *	sbridge_exit()	Module exit function
3669   *			Unregister the driver
3670   */
sbridge_exit(void)3671  static void __exit sbridge_exit(void)
3672  {
3673  	edac_dbg(2, "\n");
3674  	sbridge_remove();
3675  	mce_unregister_decode_chain(&sbridge_mce_dec);
3676  }
3677  
3678  module_init(sbridge_init);
3679  module_exit(sbridge_exit);
3680  
3681  module_param(edac_op_state, int, 0444);
3682  MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3683  
3684  MODULE_LICENSE("GPL");
3685  MODULE_AUTHOR("Mauro Carvalho Chehab");
3686  MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
3687  MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3688  		   SBRIDGE_REVISION);
3689